Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We鈥檒l occasionally send you account related emails.

Already on GitHub? Sign in to your account

[AUTO-GENERATED] Add JSDoc examples to classes. #3325

Merged
merged 4 commits into from
Nov 18, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 24 additions & 0 deletions langchain/src/agents/agent.ts
Original file line number Diff line number Diff line change
Expand Up @@ -214,6 +214,30 @@ export interface LLMSingleActionAgentInput {
* Class representing a single action agent using a LLMChain in LangChain.
* Extends the BaseSingleActionAgent class and provides methods for
* planning agent actions based on LLMChain outputs.
* @example
* ```typescript
* const customPromptTemplate = new CustomPromptTemplate({
* tools: [new Calculator()],
* inputVariables: ["input", "agent_scratchpad"],
* });
* const customOutputParser = new CustomOutputParser();
* const agent = new LLMSingleActionAgent({
* llmChain: new LLMChain({
* prompt: customPromptTemplate,
* llm: new ChatOpenAI({ temperature: 0 }),
* }),
* outputParser: customOutputParser,
* stop: ["\nObservation"],
* });
* const executor = new AgentExecutor({
* agent,
* tools: [new Calculator()],
* });
* const result = await executor.invoke({
* input:
* "Who is Olivia Wilde's boyfriend? What is his current age raised to the 0.23 power?",
* });
* ```
*/
export class LLMSingleActionAgent extends BaseSingleActionAgent {
lc_namespace = ["langchain", "agents"];
Expand Down
13 changes: 13 additions & 0 deletions langchain/src/cache/cloudflare_kv.ts
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,19 @@ import {
* Represents a specific implementation of a caching mechanism using Cloudflare KV
* as the underlying storage system. It extends the `BaseCache` class and
* overrides its methods to provide the Cloudflare KV-specific logic.
* @example
* ```typescript
* // Example of using OpenAI with Cloudflare KV as cache in a Cloudflare Worker
* const cache = new CloudflareKVCache(env.KV_NAMESPACE);
* const model = new ChatAnthropic({
* cache,
* });
* const response = await model.invoke("How are you today?");
* return new Response(JSON.stringify(response), {
* headers: { "content-type": "application/json" },
* });
*
* ```
*/
export class CloudflareKVCache extends BaseCache {
private binding: KVNamespace;
Expand Down
19 changes: 19 additions & 0 deletions langchain/src/cache/momento.ts
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,25 @@ export interface MomentoCacheProps {
/**
* A cache that uses Momento as the backing store.
* See https://gomomento.com.
* @example
* ```typescript
* const cache = new MomentoCache({
* client: new CacheClient({
* configuration: Configurations.Laptop.v1(),
* credentialProvider: CredentialProvider.fromEnvironmentVariable({
* environmentVariableName: "MOMENTO_API_KEY",
* }),
* defaultTtlSeconds: 60 * 60 * 24, // Cache TTL set to 24 hours.
* }),
* cacheName: "langchain",
* });
* // Initialize the OpenAI model with Momento cache for caching responses
* const model = new ChatOpenAI({
* cache,
* });
* await model.invoke("How are you today?");
* const cachedValues = await cache.lookup("How are you today?", "llmKey");
* ```
*/
export class MomentoCache extends BaseCache {
private client: ICacheClient;
Expand Down
15 changes: 15 additions & 0 deletions langchain/src/cache/upstash_redis.ts
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,21 @@ export type UpstashRedisCacheProps = {
/**
* A cache that uses Upstash as the backing store.
* See https://docs.upstash.com/redis.
* @example
* ```typescript
* const cache = new UpstashRedisCache({
* config: {
* url: "UPSTASH_REDIS_REST_URL",
* token: "UPSTASH_REDIS_REST_TOKEN",
* },
* });
* // Initialize the OpenAI model with Upstash Redis cache for caching responses
* const model = new ChatOpenAI({
* cache,
* });
* await model.invoke("How are you today?");
* const cachedValues = await cache.lookup("How are you today?", "llmKey");
* ```
*/
export class UpstashRedisCache extends BaseCache {
private redisClient: Redis;
Expand Down
29 changes: 29 additions & 0 deletions langchain/src/chains/conversational_retrieval_chain.ts
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,35 @@ export interface ConversationalRetrievalQAChainInput extends ChainInputs {
* Class for conducting conversational question-answering tasks with a
* retrieval component. Extends the BaseChain class and implements the
* ConversationalRetrievalQAChainInput interface.
* @example
* ```typescript
* const model = new ChatAnthropic({});
*
* const text = fs.readFileSync("state_of_the_union.txt", "utf8");
*
* const textSplitter = new RecursiveCharacterTextSplitter({ chunkSize: 1000 });
* const docs = await textSplitter.createDocuments([text]);
*
* const vectorStore = await HNSWLib.fromDocuments(docs, new OpenAIEmbeddings());
*
* const chain = ConversationalRetrievalQAChain.fromLLM(
* model,
* vectorStore.asRetriever(),
* );
*
* const question = "What did the president say about Justice Breyer?";
*
* const res = await chain.call({ question, chat_history: "" });
* console.log(res);
*
* const chatHistory = `${question}\n${res.text}`;
* const followUpRes = await chain.call({
* question: "Was that nice?",
* chat_history: chatHistory,
* });
* console.log(followUpRes);
*
* ```
*/
export class ConversationalRetrievalQAChain
extends BaseChain
Expand Down
21 changes: 21 additions & 0 deletions langchain/src/chains/retrieval_qa.ts
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,27 @@ export interface RetrievalQAChainInput extends Omit<ChainInputs, "memory"> {
/**
* Class representing a chain for performing question-answering tasks with
* a retrieval component.
* @example
* ```typescript
* // Initialize the OpenAI model and the remote retriever with the specified configuration
* const model = new ChatOpenAI({});
* const retriever = new RemoteLangChainRetriever({
* url: "http://example.com/api",
* auth: { bearer: "foo" },
* inputKey: "message",
* responseKey: "response",
* });
*
* // Create a RetrievalQAChain using the model and retriever
* const chain = RetrievalQAChain.fromLLM(model, retriever);
*
* // Execute the chain with a query and log the result
* const res = await chain.call({
* query: "What did the president say about Justice Breyer?",
* });
* console.log({ res });
*
* ```
*/
export class RetrievalQAChain
extends BaseChain
Expand Down
17 changes: 17 additions & 0 deletions langchain/src/chat_models/llama_cpp.ts
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,23 @@ export interface LlamaCppCallOptions extends BaseLanguageModelCallOptions {
* This can be installed using `npm install -S node-llama-cpp` and the minimum
* version supported in version 2.0.0.
* This also requires that have a locally built version of Llama2 installed.
* @example
* ```typescript
* // Initialize the ChatLlamaCpp model with the path to the model binary file.
* const model = new ChatLlamaCpp({
* modelPath: "/Replace/with/path/to/your/model/gguf-llama2-q4_0.bin",
* temperature: 0.5,
* });
*
* // Call the model with a message and await the response.
* const response = await model.call([
* new HumanMessage({ content: "My name is John." }),
* ]);
*
* // Log the response to the console.
* console.log({ response });
*
* ```
*/
export class ChatLlamaCpp extends SimpleChatModel<LlamaCppCallOptions> {
declare CallOptions: LlamaCppCallOptions;
Expand Down
27 changes: 27 additions & 0 deletions langchain/src/chat_models/minimax.ts
Original file line number Diff line number Diff line change
Expand Up @@ -271,6 +271,33 @@ export interface ChatMinimaxCallOptions extends BaseFunctionCallOptions {
*
* To use you should have the `MINIMAX_GROUP_ID` and `MINIMAX_API_KEY`
* environment variable set.
* @example
* ```typescript
* // Define a chat prompt with a system message setting the context for translation
* const chatPrompt = ChatPromptTemplate.fromMessages([
* SystemMessagePromptTemplate.fromTemplate(
* "You are a helpful assistant that translates {input_language} to {output_language}.",
* ),
* HumanMessagePromptTemplate.fromTemplate("{text}"),
* ]);
*
* // Create a new LLMChain with the chat model and the defined prompt
* const chainB = new LLMChain({
* prompt: chatPrompt,
* llm: new ChatMinimax({ temperature: 0.01 }),
* });
*
* // Call the chain with the input language, output language, and the text to translate
* const resB = await chainB.call({
* input_language: "English",
* output_language: "Chinese",
* text: "I love programming.",
* });
*
* // Log the result
* console.log({ resB });
*
* ```
*/
export class ChatMinimax
extends BaseChatModel<ChatMinimaxCallOptions>
Expand Down
24 changes: 24 additions & 0 deletions langchain/src/chat_models/ollama.ts
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,30 @@ export interface OllamaCallOptions extends BaseLanguageModelCallOptions {}
* A class that enables calls to the Ollama API to access large language
* models in a chat-like fashion. It extends the SimpleChatModel class and
* implements the OllamaInput interface.
* @example
* ```typescript
* const prompt = ChatPromptTemplate.fromMessages([
* [
* "system",
* `You are an expert translator. Format all responses as JSON objects with two keys: "original" and "translated".`,
* ],
* ["human", `Translate "{input}" into {language}.`],
* ]);
*
* const model = new ChatOllama({
* baseUrl: "http://api.example.com",
* model: "llama2",
* format: "json",
* });
*
* const chain = prompt.pipe(model);
*
* const result = await chain.invoke({
* input: "I love programming",
* language: "German",
* });
*
* ```
*/
export class ChatOllama
extends SimpleChatModel<OllamaCallOptions>
Expand Down
15 changes: 15 additions & 0 deletions langchain/src/chat_models/openai.ts
Original file line number Diff line number Diff line change
Expand Up @@ -193,6 +193,21 @@ export interface ChatOpenAICallOptions
* https://platform.openai.com/docs/api-reference/chat/create |
* `openai.createChatCompletion`} can be passed through {@link modelKwargs}, even
* if not explicitly available on this class.
* @example
* ```typescript
* // Create a new instance of ChatOpenAI with specific temperature and model name settings
* const model = new ChatOpenAI({
* temperature: 0.9,
* modelName: "ft:gpt-3.5-turbo-0613:{ORG_NAME}::{MODEL_ID}",
* });
*
* // Invoke the model with a message and await the response
* const message = await model.invoke("Hi there!");
*
* // Log the response to the console
* console.log(message);
*
* ```
*/
export class ChatOpenAI<
CallOptions extends ChatOpenAICallOptions = ChatOpenAICallOptions
Expand Down
37 changes: 37 additions & 0 deletions langchain/src/embeddings/cache_backed.ts
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,43 @@ export interface CacheBackedEmbeddingsFields extends AsyncCallerParams {
*
* If need be, the interface can be extended to accept other implementations
* of the value serializer and deserializer, as well as the key encoder.
* @example
* ```typescript
* const underlyingEmbeddings = new OpenAIEmbeddings();
*
* const cacheBackedEmbeddings = CacheBackedEmbeddings.fromBytesStore(
* underlyingEmbeddings,
* new ConvexKVStore({ ctx }),
* {
* namespace: underlyingEmbeddings.modelName,
* },
* );
*
* const loader = new TextLoader("./state_of_the_union.txt");
* const rawDocuments = await loader.load();
* const splitter = new RecursiveCharacterTextSplitter({
* chunkSize: 1000,
* chunkOverlap: 0,
* });
* const documents = await splitter.splitDocuments(rawDocuments);
*
* let time = Date.now();
* const vectorstore = await ConvexVectorStore.fromDocuments(
* documents,
* cacheBackedEmbeddings,
* { ctx },
* );
* console.log(`Initial creation time: ${Date.now() - time}ms`);
*
* time = Date.now();
* const vectorstore2 = await ConvexVectorStore.fromDocuments(
* documents,
* cacheBackedEmbeddings,
* { ctx },
* );
* console.log(`Cached creation time: ${Date.now() - time}ms`);
*
* ```
*/
export class CacheBackedEmbeddings extends Embeddings {
protected underlyingEmbeddings: Embeddings;
Expand Down
10 changes: 10 additions & 0 deletions langchain/src/embeddings/cohere.ts
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,16 @@ export interface CohereEmbeddingsParams extends EmbeddingsParams {

/**
* A class for generating embeddings using the Cohere API.
* @example
* ```typescript
* // Embed a query using the CohereEmbeddings class
* const model = new ChatOpenAI();
* const res = await model.embedQuery(
* "What would be a good company name for a company that makes colorful socks?",
* );
* console.log({ res });
*
* ```
*/
export class CohereEmbeddings
extends Embeddings
Expand Down
16 changes: 16 additions & 0 deletions langchain/src/embeddings/llama_cpp.ts
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,22 @@ export interface LlamaCppEmbeddingsParams
extends LlamaBaseCppInputs,
EmbeddingsParams {}

/**
* @example
* ```typescript
* // Initialize LlamaCppEmbeddings with the path to the model file
* const embeddings = new LlamaCppEmbeddings({
* modelPath: "/Replace/with/path/to/your/model/gguf-llama2-q4_0.bin",
* });
*
* // Embed a query string using the Llama embeddings
* const res = embeddings.embedQuery("Hello Llama!");
*
* // Output the resulting embeddings
* console.log(res);
*
* ```
*/
export class LlamaCppEmbeddings extends Embeddings {
_model: LlamaModel;

Expand Down
10 changes: 10 additions & 0 deletions langchain/src/embeddings/openai.ts
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,16 @@ export interface OpenAIEmbeddingsParams extends EmbeddingsParams {
* Class for generating embeddings using the OpenAI API. Extends the
* Embeddings class and implements OpenAIEmbeddingsParams and
* AzureOpenAIInput.
* @example
* ```typescript
* // Embed a query using OpenAIEmbeddings to generate embeddings for a given text
* const model = new OpenAIEmbeddings();
* const res = await model.embedQuery(
* "What would be a good company name for a company that makes colorful socks?",
* );
* console.log({ res });
*
* ```
*/
export class OpenAIEmbeddings
extends Embeddings
Expand Down
Loading