From 8378eb403f677969ffb6b783936690512e915fa5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Filip=20=C5=BBmijewski?= Date: Fri, 17 Oct 2025 10:57:58 +0200 Subject: [PATCH] feat: add docs for model gateway --- src/oss/javascript/integrations/chat/ibm.mdx | 24 +++++++++++++++++ src/oss/javascript/integrations/llms/ibm.mdx | 26 +++++++++++++++++++ .../integrations/text_embedding/ibm.mdx | 12 +++++++++ 3 files changed, 62 insertions(+) diff --git a/src/oss/javascript/integrations/chat/ibm.mdx b/src/oss/javascript/integrations/chat/ibm.mdx index 94381d7d4b..081cb4db65 100644 --- a/src/oss/javascript/integrations/chat/ibm.mdx +++ b/src/oss/javascript/integrations/chat/ibm.mdx @@ -148,6 +148,30 @@ Note: - You must provide `spaceId`, `projectId` or `idOrName`(deployment id) unless you use lighweight engine which works without specifying either (refer to [watsonx.ai docs](https://www.ibm.com/docs/en/cloud-paks/cp-data/5.0.x?topic=install-choosing-installation-mode)) - Depending on the region of your provisioned service instance, use correct serviceUrl. +### Using Model Gateway + +```typescript +import { ChatWatsonx } from "@langchain/community/chat_models/ibm"; +const props = { + maxTokens: 200, + temperature: 0.5 +}; + +const instance = new ChatWatsonx({ + version: "YYYY-MM-DD", + serviceUrl: process.env.API_URL, + model: "", + modelGateway: true, + ...props +}); +``` + +To use model gateway with Langchain, you need to previously create a provider and add model via `@ibm-cloud/watsonx-ai` SDK or `watsonx.ai` API. Follow this documentation: +- [API](https://cloud.ibm.com/apidocs/watsonx-ai#create-watsonxai-provider). +- [SDK](https://ibm.github.io/watsonx-ai-node-sdk/modules/1_7_x.gateway.html). + + + ## Invocation ```javascript diff --git a/src/oss/javascript/integrations/llms/ibm.mdx b/src/oss/javascript/integrations/llms/ibm.mdx index 437280555a..828e58a236 100644 --- a/src/oss/javascript/integrations/llms/ibm.mdx +++ b/src/oss/javascript/integrations/llms/ibm.mdx @@ -147,6 +147,32 @@ Note: - Depending on the region of your provisioned service instance, use correct serviceUrl. - You need to specify the model you want to use for inferencing through model_id. +### Using Model Gateway +```typescript +import { WatsonxLLM } from "@langchain/community/llms/ibm"; + +const props = { + decoding_method: "sample", + maxNewTokens: 100, + minNewTokens: 1, + temperature: 0.5, + topK: 50, + topP: 1, +}; + +const instance = new WatsonxLLM({ + version: "YYYY-MM-DD", + serviceUrl: process.env.API_URL, + model: "", + modelGateway: true, + ...props, +}); +``` + +To use model gateway with Langchain, you need to previously create a provider and add model via `@ibm-cloud/watsonx-ai` SDK or `watsonx.ai` API. Follow this documentation: +- [API](https://cloud.ibm.com/apidocs/watsonx-ai#create-watsonxai-provider). +- [SDK](https://ibm.github.io/watsonx-ai-node-sdk/modules/1_7_x.gateway.html). + ## Invocation and generation ```javascript diff --git a/src/oss/javascript/integrations/text_embedding/ibm.mdx b/src/oss/javascript/integrations/text_embedding/ibm.mdx index 10c2ebffe8..19f042ed14 100644 --- a/src/oss/javascript/integrations/text_embedding/ibm.mdx +++ b/src/oss/javascript/integrations/text_embedding/ibm.mdx @@ -136,6 +136,18 @@ Note: - You must provide `spaceId` or `projectId` in order to proceed. - Depending on the region of your provisioned service instance, use correct serviceUrl. +### Model Gateway +```typescript +import { WatsonxEmbeddings } from "@langchain/community/embeddings/ibm"; + +const instance = new WatsonxEmbeddings({ + version: "YYYY-MM-DD", + serviceUrl: process.env.API_URL, + model: "", + modelGateway: true, +}); +``` + ## Indexing and Retrieval Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our RAG tutorials under the [**Learn** tab](/oss/learn/).