diff --git a/docs/weaviate/model-providers/_includes/integration_contextualai_rag.png b/docs/weaviate/model-providers/_includes/integration_contextualai_rag.png
new file mode 100644
index 00000000..8b6ab15a
Binary files /dev/null and b/docs/weaviate/model-providers/_includes/integration_contextualai_rag.png differ
diff --git a/docs/weaviate/model-providers/_includes/integration_contextualai_rag_grouped.png b/docs/weaviate/model-providers/_includes/integration_contextualai_rag_grouped.png
new file mode 100644
index 00000000..8b6ab15a
Binary files /dev/null and b/docs/weaviate/model-providers/_includes/integration_contextualai_rag_grouped.png differ
diff --git a/docs/weaviate/model-providers/_includes/integration_contextualai_rag_single.png b/docs/weaviate/model-providers/_includes/integration_contextualai_rag_single.png
new file mode 100644
index 00000000..8b6ab15a
Binary files /dev/null and b/docs/weaviate/model-providers/_includes/integration_contextualai_rag_single.png differ
diff --git a/docs/weaviate/model-providers/_includes/integration_contextualai_reranker.png b/docs/weaviate/model-providers/_includes/integration_contextualai_reranker.png
new file mode 100644
index 00000000..2f573d7a
Binary files /dev/null and b/docs/weaviate/model-providers/_includes/integration_contextualai_reranker.png differ
diff --git a/docs/weaviate/model-providers/_includes/provider.connect.py b/docs/weaviate/model-providers/_includes/provider.connect.py
index c6a03fc5..27cc262f 100644
--- a/docs/weaviate/model-providers/_includes/provider.connect.py
+++ b/docs/weaviate/model-providers/_includes/provider.connect.py
@@ -25,6 +25,10 @@
# Recommended: save sensitive data as environment variables
cohere_key = os.getenv("COHERE_APIKEY")
# END CohereInstantiation
+# START ContextualAIInstantiation
+# Recommended: save sensitive data as environment variables
+contextualai_key = os.getenv("CONTEXTUAL_API_KEY")
+# END ContextualAIInstantiation
# START DatabricksInstantiation
# Recommended: save sensitive data as environment variables
databricks_token = os.getenv("DATABRICKS_TOKEN")
@@ -143,6 +147,10 @@
"X-Xai-Api-Key": xai_key,
# END XaiInstantiation
+# START ContextualAIInstantiation
+ "X-Contextual-Api-Key": contextualai_key,
+# END ContextualAIInstantiation
+
# START-ANY
}
# highlight-end
diff --git a/docs/weaviate/model-providers/_includes/provider.connect.ts b/docs/weaviate/model-providers/_includes/provider.connect.ts
index 959656e8..d4e2e7ed 100644
--- a/docs/weaviate/model-providers/_includes/provider.connect.ts
+++ b/docs/weaviate/model-providers/_includes/provider.connect.ts
@@ -16,6 +16,9 @@ const aws_secret_key = process.env.AWS_SECRET_KEY || ''; // Replace with your A
// START CohereInstantiation
const cohereApiKey = process.env.COHERE_APIKEY || ''; // Replace with your inference API key
// END CohereInstantiation
+// START ContextualAIInstantiation
+const contextualApiKey = process.env.CONTEXTUAL_API_KEY || '';
+// END ContextualAIInstantiation
// START DatabricksInstantiation
const databricksToken = process.env.DATABRICKS_TOKEN || ''; // Replace with your inference API key
// END DatabricksInstantiation
@@ -119,6 +122,9 @@ const client = await weaviate.connectToWeaviateCloud(
// START XaiInstantiation
'X-Xai-Api-Key': xaiApiKey,
// END XaiInstantiation
+ // START ContextualAIInstantiation
+ 'X-Contextual-Api-Key': contextualApiKey,
+ // END ContextualAIInstantiation
// START-ANY
}
// highlight-end
diff --git a/docs/weaviate/model-providers/_includes/provider.generative.py b/docs/weaviate/model-providers/_includes/provider.generative.py
index 427684b4..1d85d8c5 100644
--- a/docs/weaviate/model-providers/_includes/provider.generative.py
+++ b/docs/weaviate/model-providers/_includes/provider.generative.py
@@ -46,6 +46,86 @@ def import_data():
# clean up
client.collections.delete("DemoCollection")
+# START BasicGenerativeContextualAI
+from weaviate.classes.config import Configure
+
+client.collections.create(
+ "DemoCollection",
+ # highlight-start
+ generative_config=Configure.Generative.contextualai()
+ # highlight-end
+ # Additional parameters not shown
+)
+# END BasicGenerativeContextualAI
+
+# clean up
+client.collections.delete("DemoCollection")
+
+# START GenerativeContextualAICustomModel
+from weaviate.classes.config import Configure
+
+client.collections.create(
+ "DemoCollection",
+ # highlight-start
+ generative_config=Configure.Generative.contextualai(
+ model="v2"
+ )
+ # highlight-end
+ # Additional parameters not shown
+)
+# END GenerativeContextualAICustomModel
+
+# clean up
+client.collections.delete("DemoCollection")
+
+# START FullGenerativeContextualAI
+from weaviate.classes.config import Configure
+
+client.collections.create(
+ "DemoCollection",
+ # highlight-start
+ generative_config=Configure.Generative.contextualai(
+ # # These parameters are optional
+ # model="v2",
+ # max_tokens=1024,
+ # temperature=0.7,
+ # top_p=0.9,
+ # system_prompt="You are a helpful assistant",
+ # avoid_commentary=True,
+ )
+ # highlight-end
+ # Additional parameters not shown
+)
+# END FullGenerativeContextualAI
+
+# clean up
+client.collections.delete("DemoCollection")
+import_data()
+
+# START RuntimeModelSelectionContextualAI
+from weaviate.classes.config import Configure
+from weaviate.classes.generate import GenerativeConfig
+
+collection = client.collections.use("DemoCollection")
+response = collection.generate.near_text(
+ query="A holiday film",
+ limit=2,
+ grouped_task="Write a tweet promoting these two movies",
+ # highlight-start
+ generative_provider=GenerativeConfig.contextualai(
+ # # These parameters are optional
+ # model="v2",
+ # max_tokens=1024,
+ # temperature=0.7,
+ # top_p=0.9,
+ # system_prompt="You are a helpful assistant",
+ # avoid_commentary=True,
+ ),
+ # Additional parameters not shown
+ # highlight-end
+)
+# END RuntimeModelSelectionContextualAI
+
# START BasicGenerativeAnthropic
from weaviate.classes.config import Configure
diff --git a/docs/weaviate/model-providers/_includes/provider.generative.ts b/docs/weaviate/model-providers/_includes/provider.generative.ts
index 16789a17..b902082d 100644
--- a/docs/weaviate/model-providers/_includes/provider.generative.ts
+++ b/docs/weaviate/model-providers/_includes/provider.generative.ts
@@ -38,6 +38,55 @@ async function main() {
// Clean up
await client.collections.delete('DemoCollection');
+// START BasicGenerativeContextualAI
+await client.collections.create({
+ name: 'DemoCollection',
+ // highlight-start
+ generative: weaviate.configure.generative.contextualai(),
+ // highlight-end
+ // Additional parameters not shown
+});
+// END BasicGenerativeContextualAI
+
+// Clean up
+await client.collections.delete('DemoCollection');
+
+// START GenerativeContextualAICustomModel
+await client.collections.create({
+ name: 'DemoCollection',
+ // highlight-start
+ generative: weaviate.configure.generative.contextualai({
+ model: 'v2'
+ }),
+ // highlight-end
+ // Additional parameters not shown
+});
+// END GenerativeContextualAICustomModel
+
+// Clean up
+await client.collections.delete('DemoCollection');
+
+// START FullGenerativeContextualAI
+await client.collections.create({
+ name: 'DemoCollection',
+ // highlight-start
+ generative: weaviate.configure.generative.contextualai({
+ // These parameters are optional
+ // model: 'v2',
+ // maxTokens: 1024,
+ // temperature: 0.7,
+ // topP: 0.9,
+ // systemPrompt: 'You are a helpful assistant',
+ // avoidCommentary: true,
+ }),
+ // highlight-end
+ // Additional parameters not shown
+});
+// END FullGenerativeContextualAI
+
+// Clean up
+await client.collections.delete('DemoCollection');
+
// START BasicGenerativeAnthropic
await client.collections.create({
name: 'DemoCollection',
@@ -117,6 +166,27 @@ response = await myCollection.generate.nearText("A holiday film", {
// Additional parameters not shown
);
// END RuntimeModelSelectionAnthropic
+
+// START RuntimeModelSelectionContextualAI
+response = await myCollection.generate.nearText('A holiday film', {
+ // highlight-start
+ groupedTask: 'Write a tweet promoting these two movies',
+ config: generativeParameters.contextualai({
+ // These parameters are optional
+ // model: 'v2',
+ // maxTokens: 1024,
+ // temperature: 0.7,
+ // topP: 0.9,
+ // systemPrompt: 'You are a helpful assistant',
+ // avoidCommentary: true,
+ }),
+ // highlight-end
+}, {
+ limit: 2,
+}
+ // Additional parameters not shown
+);
+// END RuntimeModelSelectionContextualAI
(async () => {
// START WorkingWithImagesAnthropic
const srcImgPath = "https://upload.wikimedia.org/wikipedia/commons/thumb/b/b0/Winter_forest_silver.jpg/960px-Winter_forest_silver.jpg"
diff --git a/docs/weaviate/model-providers/_includes/provider.reranker.py b/docs/weaviate/model-providers/_includes/provider.reranker.py
index e8faa923..22510145 100644
--- a/docs/weaviate/model-providers/_includes/provider.reranker.py
+++ b/docs/weaviate/model-providers/_includes/provider.reranker.py
@@ -28,6 +28,40 @@
# Clean up
client.collections.delete("DemoCollection")
+# START RerankerContextualAIBasic
+from weaviate.classes.config import Configure
+
+client.collections.create(
+ "DemoCollection",
+ # highlight-start
+ reranker_config=Configure.Reranker.contextualai()
+ # highlight-end
+ # Additional parameters not shown
+)
+# END RerankerContextualAIBasic
+
+# Clean up
+client.collections.delete("DemoCollection")
+
+# START RerankerContextualAICustomModel
+from weaviate.classes.config import Configure
+
+client.collections.create(
+ "DemoCollection",
+ # highlight-start
+ reranker_config=Configure.Reranker.contextualai(
+ model="ctxl-rerank-v2-instruct-multilingual",
+ instruction="Prioritize internal sales documents over market analysis reports. More recent documents should be weighted higher.",
+ top_n=5
+ )
+ # highlight-end
+ # Additional parameters not shown
+)
+# END RerankerContextualAICustomModel
+
+# Clean up
+client.collections.delete("DemoCollection")
+
# START RerankerCohereBasic
from weaviate.classes.config import Configure
diff --git a/docs/weaviate/model-providers/_includes/provider.reranker.ts b/docs/weaviate/model-providers/_includes/provider.reranker.ts
index 0a2cc29b..3a9c4781 100644
--- a/docs/weaviate/model-providers/_includes/provider.reranker.ts
+++ b/docs/weaviate/model-providers/_includes/provider.reranker.ts
@@ -41,6 +41,28 @@ await client.collections.create({
});
// END RerankerCohereCustomModel
+// START RerankerContextualAIBasic
+await client.collections.create({
+ name: 'DemoCollection',
+ // highlight-start
+ reranker: weaviate.configure.reranker.contextualai(),
+ // highlight-end
+});
+// END RerankerContextualAIBasic
+
+// START RerankerContextualAICustomModel
+await client.collections.create({
+ name: 'DemoCollection',
+ // highlight-start
+ reranker: weaviate.configure.reranker.contextualai({
+ model: 'ctxl-rerank-v2-instruct-multilingual',
+ instruction: 'Prioritize internal sales documents over market analysis reports. More recent documents should be weighted higher.',
+ topN: 5,
+ }),
+ // highlight-end
+});
+// END RerankerContextualAICustomModel
+
// START RerankerJinaAIBasic
await client.collections.create({
name: 'DemoCollection',
diff --git a/docs/weaviate/model-providers/contextualai/_category_.json b/docs/weaviate/model-providers/contextualai/_category_.json
new file mode 100644
index 00000000..63938e5f
--- /dev/null
+++ b/docs/weaviate/model-providers/contextualai/_category_.json
@@ -0,0 +1,5 @@
+{
+ "label": "Contextual AI",
+ "position": 140
+}
+
diff --git a/docs/weaviate/model-providers/contextualai/generative.md b/docs/weaviate/model-providers/contextualai/generative.md
new file mode 100644
index 00000000..9337c77e
--- /dev/null
+++ b/docs/weaviate/model-providers/contextualai/generative.md
@@ -0,0 +1,269 @@
+---
+title: Generative AI
+description: Contextual AI Generative Model Provider
+sidebar_position: 50
+image: og/docs/integrations/provider_integrations_contextualai.jpg
+tags: ['model providers', 'contextual ai', 'generative', 'rag']
+---
+
+# Contextual AI's Generative AI with Weaviate
+
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+import FilteredTextBlock from '@site/src/components/Documentation/FilteredTextBlock';
+import PyConnect from '!!raw-loader!../_includes/provider.connect.py';
+import TSConnect from '!!raw-loader!../_includes/provider.connect.ts';
+import PyCode from '!!raw-loader!../_includes/provider.generative.py';
+import TSCode from '!!raw-loader!../_includes/provider.generative.ts';
+
+Contextual AI allows you to access their [Grounded Language Model (GLM)](https://contextual.ai/blog/introducing-grounded-language-model?utm_campaign=GLM-integration&utm_source=weaviate&utm_medium=github&utm_content=repo) directly from Weaviate.
+
+[Configure a Weaviate collection](#configure-collection) to use a generative AI model with Contextual AI. Weaviate will perform retrieval augmented generation (RAG) using the specified model and your Contextual AI API key.
+
+[Configure collection](#configure-collection) • [Select a model](#select-a-model) • [Parameters](#generative-parameters) • [Runtime selection](#select-a-model-at-runtime) • [Headers](#header-parameters) • [RAG](#retrieval-augmented-generation)
+
+More specifically, Weaviate will perform a search, retrieve the most relevant objects, and then pass them to the Contextual AI generative model to generate outputs.
+
+
+
+## Requirements
+
+### Weaviate configuration
+
+Your Weaviate instance must be configured with the Contextual AI generative integration (`generative-contextualai`) module.
+
+
+ For Weaviate Cloud (WCD) users
+
+This integration is enabled by default on Weaviate Cloud (WCD) serverless instances.
+
+
+
+
+ For self-hosted users
+
+- Check the [cluster metadata](/deploy/configuration/meta.md) to verify if the module is enabled.
+- Follow the [how-to configure modules](../../configuration/modules.md) guide to enable the module in Weaviate.
+
+
+
+### API credentials
+
+Provide a Contextual AI API key to Weaviate. Sign up at `https://contextual.ai/`.
+
+Provide the API key using one of the following methods:
+
+- Set the `CONTEXTUAL_API_KEY` environment variable available to Weaviate.
+- Provide the API key at runtime via headers (see below) or client instantiation examples.
+
+
+
+
+
+
+
+
+
+
+
+
+
+## Configure collection
+
+import MutableGenerativeConfig from '/_includes/mutable-generative-config.md';
+
+
+
+Configure a Weaviate collection to use Contextual AI as follows:
+
+
+
+
+
+
+
+
+
+
+
+
+### Select a model
+
+You can specify a model version (defaults to `v2`): `v1` or `v2`.
+
+
+
+
+
+
+
+
+
+
+
+
+### Generative parameters
+
+Supported parameters include:
+
+- `model`: The version of Contextual's GLM to use. Currently, we have `v1` and `v2` (defaults to `v2`)
+- `temperature`: The sampling temperature, which affects the randomness in the response. Note that higher temperature values can reduce groundedness. Range: 0 ≤ x ≤ 1 (defaults to 0)
+- `topP`: A parameter for nucleus sampling, an alternative to temperature which also affects the randomness of the response. Note that higher top_p values can reduce groundedness. Range: 0 < x ≤ 1 (defaults to 0.9)
+- `maxTokens`: The maximum number of tokens that the model can generate in the response. Range: 1 ≤ x ≤ 2048 (defaults to 1024)
+- `systemPrompt`: Instructions that the model follows when generating responses. Note that we do not guarantee that the model follows these instructions exactly (optional)
+- `avoidCommentary`: Flag to indicate whether the model should avoid providing additional commentary in responses. Commentary is conversational in nature and does not contain verifiable claims; therefore, commentary is not strictly grounded in available context. However, commentary may provide useful context which improves the helpfulness of responses (defaults to `false`)
+
+Additional `knowledge` array can be provided at runtime for RAG scenarios. The knowledge sources the model can use when generating a response are required for proper grounded generation.
+
+
+
+
+
+
+
+
+
+
+
+
+## Select a model at runtime
+
+You can override the default provider at query time.
+
+
+
+
+
+
+
+
+
+
+## Header parameters
+
+Runtime headers:
+
+- `X-Contextual-Api-Key`: Contextual AI API key.
+- `X-Contextual-Baseurl`: Optional base URL (e.g., a proxy). This is the only way to customize the API endpoint URL.
+
+Any additional headers provided at runtime override existing configuration.
+
+## Retrieval augmented generation
+
+Use either the single prompt or grouped task method.
+
+### Single prompt
+
+
+
+To generate text for each object in the search results, use the single prompt method.
+
+
+
+
+
+
+
+
+
+
+### Grouped task
+
+Generates a single output for the entire result set.
+
+
+
+
+
+
+
+
+
+
+
+
+## Further resources
+
+- Contextual AI API docs: `https://docs.contextual.ai/user-guides/beginner-guide`
+- [Contextual AI Reranker Models + Weaviate](./reranker.md)
+- [Introducing the most grounded language model (GLM)](https://contextual.ai/blog/introducing-grounded-language-model?utm_campaign=GLM-integration&utm_source=weaviate&utm_medium=github&utm_content=repo)
+- [How-to: Manage collections](../../manage-collections/index.mdx)
+- [How-to: Query & Search](../../search/index.mdx)
+
+## Questions and feedback
+
+import DocsFeedback from '/_includes/docs-feedback.mdx';
+
+
+
+
diff --git a/docs/weaviate/model-providers/contextualai/index.md b/docs/weaviate/model-providers/contextualai/index.md
new file mode 100644
index 00000000..545c095d
--- /dev/null
+++ b/docs/weaviate/model-providers/contextualai/index.md
@@ -0,0 +1,57 @@
+---
+title: Contextual AI + Weaviate
+sidebar_position: 10
+image: og/docs/integrations/provider_integrations_contextualai.jpg
+tags: ['model providers', 'contextual ai']
+---
+
+
+
+Contextual AI offers powerful models for natural language processing and generation. Weaviate seamlessly integrates with [Contextual AI's APIs](https://contextual.ai/), allowing users to leverage their [Grounded Language Model (GLM)](https://contextual.ai/blog/introducing-grounded-language-model?utm_campaign=GLM-integration&utm_source=weaviate&utm_medium=github&utm_content=repo) and [reranker models](https://contextual.ai/blog/rerank-v2/?utm_campaign=contextual-ai-integration&utm_source=weaviate&utm_medium=github&utm_content=repo) directly from the Weaviate Database.
+
+These integrations empower developers to build sophisticated AI-driven applications with ease.
+
+## Integrations with Contextual AI
+
+### Generative AI models for RAG
+
+
+
+Contextual AI's [Grounded Language Model (GLM)](https://contextual.ai/blog/introducing-grounded-language-model?utm_campaign=GLM-integration&utm_source=weaviate&utm_medium=github&utm_content=repo) can generate human-like text based on given prompts and contexts, prioritizing faithfulness to in-context retrievals to reduce hallucinations.
+
+[Weaviate's generative AI integration](./generative.md) enables users to perform retrieval augmented generation (RAG) directly from the Weaviate Database. This combines Weaviate's efficient storage and fast retrieval capabilities with Contextual AI's generative AI models to generate personalized and context-aware responses.
+
+[Contextual AI generative AI integration page](./generative.md)
+
+### Reranker models
+
+
+
+Contextual AI's [reranker models](https://contextual.ai/blog/rerank-v2/?utm_campaign=contextual-ai-integration&utm_source=weaviate&utm_medium=github&utm_content=repo) are designed to improve the relevance and ranking of search results using custom instructions.
+
+[The Weaviate reranker integration](./reranker.md) allows users to easily refine their search results by leveraging Contextual AI's reranker models.
+
+[Contextual AI reranker integration page](./reranker.md)
+
+## Summary
+
+These integrations enable developers to leverage Contextual AI's powerful models directly within Weaviate.
+
+In turn, they simplify the process of building AI-driven applications to speed up your development process, so that you can focus on creating innovative solutions.
+
+## Get started
+
+You must provide a valid Contextual AI API key to Weaviate for these integrations. Go to [Contextual AI](https://contextual.ai/) to sign up and obtain an API key.
+
+Then, go to the relevant integration page to learn how to configure Weaviate with the Contextual AI models and start using them in your applications.
+
+- [Generative AI](./generative.md)
+- [Reranker](./reranker.md)
+
+## Questions and feedback
+
+import DocsFeedback from '/_includes/docs-feedback.mdx';
+
+
+
+
diff --git a/docs/weaviate/model-providers/contextualai/reranker.md b/docs/weaviate/model-providers/contextualai/reranker.md
new file mode 100644
index 00000000..13e6eb3b
--- /dev/null
+++ b/docs/weaviate/model-providers/contextualai/reranker.md
@@ -0,0 +1,187 @@
+---
+title: Reranker
+description: Contextual AI Reranker Model Provider
+sidebar_position: 70
+image: og/docs/integrations/provider_integrations_contextualai.jpg
+tags: ['model providers', 'contextual ai', 'reranker']
+---
+
+# Contextual AI Reranker Models with Weaviate
+
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+import FilteredTextBlock from '@site/src/components/Documentation/FilteredTextBlock';
+import PyConnect from '!!raw-loader!../_includes/provider.connect.py';
+import TSConnect from '!!raw-loader!../_includes/provider.connect.ts';
+import PyCode from '!!raw-loader!../_includes/provider.reranker.py';
+import TSCode from '!!raw-loader!../_includes/provider.reranker.ts';
+
+Weaviate's integration with [Contextual AI rerankers](https://contextual.ai/blog/rerank-v2/?utm_campaign=contextual-ai-integration&utm_source=weaviate&utm_medium=github&utm_content=repo) allows you to rerank search results using their models with custom instructions for recency, document type, source, and metadata.
+
+
+
+## Requirements
+
+### Weaviate configuration
+
+Your Weaviate instance must be configured with the Contextual AI reranker integration (`reranker-contextualai`) module.
+
+
+ For Weaviate Cloud (WCD) users
+
+This integration is enabled by default on Weaviate Cloud (WCD) serverless instances.
+
+
+
+
+ For self-hosted users
+
+- Check the [cluster metadata](/deploy/configuration/meta.md) to verify if the module is enabled.
+- Follow the [how-to configure modules](../../configuration/modules.md) guide to enable the module in Weaviate.
+
+
+
+### API credentials
+
+Provide a Contextual AI API key to Weaviate. Sign up at `https://contextual.ai/`.
+
+Provide the API key using one of the following methods:
+
+- Set the `CONTEXTUAL_API_KEY` environment variable available to Weaviate.
+- Provide the API key at runtime via headers (see below) or client instantiation examples.
+
+
+
+
+
+
+
+
+
+
+
+
+
+## Configure the reranker
+
+import MutableRerankerConfig from '/_includes/mutable-reranker-config.md';
+
+
+
+Configure a collection to use Contextual AI reranker:
+
+
+
+
+
+
+
+
+
+
+
+
+### Select a model
+
+Available models include:
+
+- `ctxl-rerank-v2-instruct-multilingual` (default)
+- `ctxl-rerank-v2-instruct-multilingual-mini`
+- `ctxl-rerank-v1-instruct`
+
+
+### Advanced parameter configuration
+
+You can configure additional parameters for fine-tuned reranking behavior:
+
+- `model`: The version of the reranker to use. Currently, we have: `ctxl-rerank-v2-instruct-multilingual`, `ctxl-rerank-v2-instruct-multilingual-mini`, `ctxl-rerank-v1-instruct` (defaults to `ctxl-rerank-v2-instruct-multilingual`)
+- `instruction`: Instructions that the reranker references when ranking documents, after considering relevance. We evaluated the model on instructions for recency, document type, source, and metadata, and it can generalize to other instructions as well. For instructions related to recency and timeframe, specify the timeframe (e.g., instead of saying "this year") because the reranker doesn't know the current date. Example: "Prioritize internal sales documents over market analysis reports. More recent documents should be weighted higher. Enterprise portal content supersedes distributor communications." (optional)
+- `topN`: The number of top-ranked results to return (optional)
+
+
+
+
+
+
+
+
+
+
+
+
+## Header parameters
+
+Runtime headers:
+
+- `X-Contextual-Api-Key`: Contextual AI API key.
+
+## Reranking query
+
+Any search can be combined with a reranker.
+
+
+
+
+
+
+
+
+
+
+
+
+
+## Further resources
+
+- [Contextual AI Reranker API docs](https://docs.contextual.ai/api-reference/rerank/rerank)
+- [Introducing the instruction-following reranker](https://contextual.ai/blog/introducing-instruction-following-reranker)
+- [Contextual AI Reranker v2 Blog Post](https://contextual.ai/blog/rerank-v2/?utm_campaign=contextual-ai-integration&utm_source=weaviate&utm_medium=github&utm_content=repo)
+- [Contextual AI Generative AI + Weaviate](./generative.md)
+
+## Questions and feedback
+
+import DocsFeedback from '/_includes/docs-feedback.mdx';
+
+
+
+
diff --git a/docs/weaviate/model-providers/index.md b/docs/weaviate/model-providers/index.md
index f81647f8..acd80813 100644
--- a/docs/weaviate/model-providers/index.md
+++ b/docs/weaviate/model-providers/index.md
@@ -21,6 +21,7 @@ This enables an enhanced developed experience, such as the ability to:
| [Anyscale](./anyscale/index.md) | - | [Text](./anyscale/generative.md) | - |
| [AWS](./aws/index.md) | [Text](./aws/embeddings.md) | [Text](./aws/generative.md) |
| [Cohere](./cohere/index.md) | [Text](./cohere/embeddings.md), [Multimodal](./cohere/embeddings-multimodal.md) | [Text](./cohere/generative.md) | [Reranker](./cohere/reranker.md) |
+| [Contextual AI](./contextualai/index.md) | - | [Text](./contextualai/generative.md) | [Reranker](./contextualai/reranker.md) |
| [Databricks](./databricks/index.md) | [Text](./databricks/embeddings.md) | [Text](./databricks/generative.md) | - |
| [FriendliAI](./friendliai/index.md) | - | [Text](./friendliai/generative.md) | - |
| [Google](./google/index.md) | [Text](./google/embeddings.md), [Multimodal](./google/embeddings-multimodal.md) | [Text](./google/generative.md) | - |
diff --git a/sidebars.js b/sidebars.js
index 446e4244..97a9305c 100644
--- a/sidebars.js
+++ b/sidebars.js
@@ -165,6 +165,19 @@ const sidebars = {
"weaviate/model-providers/cohere/reranker",
],
},
+ {
+ type: "category",
+ label: "Contextual AI",
+ className: "sidebar-item",
+ link: {
+ type: "doc",
+ id: "weaviate/model-providers/contextualai/index",
+ },
+ items: [
+ "weaviate/model-providers/contextualai/generative",
+ "weaviate/model-providers/contextualai/reranker",
+ ],
+ },
{
type: "category",
label: "Databricks",