diff --git a/packages/components/nodes/chains/ApiChain/ApiChain.ts b/packages/components/nodes/chains/ApiChain/ApiChain.ts deleted file mode 100644 index bf810340d00..00000000000 --- a/packages/components/nodes/chains/ApiChain/ApiChain.ts +++ /dev/null @@ -1,88 +0,0 @@ -import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' -import { APIChain } from 'langchain/chains' -import { CustomChainHandler, getBaseClasses } from '../../../src/utils' -import { BaseLanguageModel } from 'langchain/base_language' -import { Document } from 'langchain/document' -import { PromptTemplate } from 'langchain/prompts' - -class ApiChain_Chains implements INode { - label: string - name: string - type: string - icon: string - category: string - baseClasses: string[] - description: string - inputs: INodeParams[] - - constructor() { - this.label = 'API Chain' - this.name = 'apiChain' - this.type = 'ApiChain' - this.icon = 'apichain.svg' - this.category = 'Chains' - this.description = 'Chain to run queries against API' - this.baseClasses = [this.type, ...getBaseClasses(APIChain)] - this.inputs = [ - { - label: 'Language Model', - name: 'model', - type: 'BaseLanguageModel' - }, - { - label: 'Document', - name: 'document', - type: 'Document' - }, - { - label: 'Headers', - name: 'headers', - type: 'json', - additionalParams: true, - optional: true - } - ] - } - - async init(nodeData: INodeData): Promise { - const model = nodeData.inputs?.model as BaseLanguageModel - const docs = nodeData.inputs?.document as Document[] - const headers = nodeData.inputs?.headers as string - - const chain = await getAPIChain(docs, model, headers) - return chain - } - - async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { - const model = nodeData.inputs?.model as BaseLanguageModel - const docs = nodeData.inputs?.document as Document[] - const headers = nodeData.inputs?.headers as string - - const chain = await getAPIChain(docs, model, headers) - if (options.socketIO && options.socketIOClientId) { - const handler = new CustomChainHandler(options.socketIO, options.socketIOClientId) - const res = await chain.run(input, [handler]) - return res - } else { - const res = await chain.run(input) - return res - } - } -} - -const getAPIChain = async (documents: Document[], llm: BaseLanguageModel, headers: any) => { - const texts = documents.map(({ pageContent }) => pageContent) - const apiResponsePrompt = new PromptTemplate({ - inputVariables: ['api_docs', 'question', 'api_url', 'api_response'], - template: 'Given this {api_response} response for {api_url}. use the given response to answer this {question}' - }) - - const chain = APIChain.fromLLMAndAPIDocs(llm, texts.toString(), { - apiResponsePrompt, - verbose: process.env.DEBUG === 'true' ? true : false, - headers: typeof headers === 'object' ? headers : headers ? JSON.parse(headers) : {} - }) - return chain -} - -module.exports = { nodeClass: ApiChain_Chains } diff --git a/packages/components/nodes/chains/ApiChain/GETApiChain.ts b/packages/components/nodes/chains/ApiChain/GETApiChain.ts new file mode 100644 index 00000000000..8e6577494bb --- /dev/null +++ b/packages/components/nodes/chains/ApiChain/GETApiChain.ts @@ -0,0 +1,129 @@ +import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' +import { APIChain } from 'langchain/chains' +import { CustomChainHandler, getBaseClasses } from '../../../src/utils' +import { BaseLanguageModel } from 'langchain/base_language' +import { PromptTemplate } from 'langchain/prompts' + +export const API_URL_RAW_PROMPT_TEMPLATE = `You are given the below API Documentation: +{api_docs} +Using this documentation, generate the full API url to call for answering the user question. +You should build the API url in order to get a response that is as short as possible, while still getting the necessary information to answer the question. Pay attention to deliberately exclude any unnecessary pieces of data in the API call. + +Question:{question} +API url:` + +export const API_RESPONSE_RAW_PROMPT_TEMPLATE = + 'Given this {api_response} response for {api_url}. use the given response to answer this {question}' + +class GETApiChain_Chains implements INode { + label: string + name: string + type: string + icon: string + category: string + baseClasses: string[] + description: string + inputs: INodeParams[] + + constructor() { + this.label = 'GET API Chain' + this.name = 'getApiChain' + this.type = 'GETApiChain' + this.icon = 'apichain.svg' + this.category = 'Chains' + this.description = 'Chain to run queries against GET API' + this.baseClasses = [this.type, ...getBaseClasses(APIChain)] + this.inputs = [ + { + label: 'Language Model', + name: 'model', + type: 'BaseLanguageModel' + }, + { + label: 'API Documentation', + name: 'apiDocs', + type: 'string', + description: + 'Description of how API works. Please refer to more examples', + rows: 4 + }, + { + label: 'Headers', + name: 'headers', + type: 'json', + additionalParams: true, + optional: true + }, + { + label: 'URL Prompt', + name: 'urlPrompt', + type: 'string', + description: 'Prompt used to tell LLMs how to construct the URL. Must contains {api_docs} and {question}', + default: API_URL_RAW_PROMPT_TEMPLATE, + rows: 4, + additionalParams: true + }, + { + label: 'Answer Prompt', + name: 'ansPrompt', + type: 'string', + description: + 'Prompt used to tell LLMs how to return the API response. Must contains {api_response}, {api_url}, and {question}', + default: API_RESPONSE_RAW_PROMPT_TEMPLATE, + rows: 4, + additionalParams: true + } + ] + } + + async init(nodeData: INodeData): Promise { + const model = nodeData.inputs?.model as BaseLanguageModel + const apiDocs = nodeData.inputs?.apiDocs as string + const headers = nodeData.inputs?.headers as string + const urlPrompt = nodeData.inputs?.urlPrompt as string + const ansPrompt = nodeData.inputs?.ansPrompt as string + + const chain = await getAPIChain(apiDocs, model, headers, urlPrompt, ansPrompt) + return chain + } + + async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { + const model = nodeData.inputs?.model as BaseLanguageModel + const apiDocs = nodeData.inputs?.apiDocs as string + const headers = nodeData.inputs?.headers as string + const urlPrompt = nodeData.inputs?.urlPrompt as string + const ansPrompt = nodeData.inputs?.ansPrompt as string + + const chain = await getAPIChain(apiDocs, model, headers, urlPrompt, ansPrompt) + if (options.socketIO && options.socketIOClientId) { + const handler = new CustomChainHandler(options.socketIO, options.socketIOClientId, 2) + const res = await chain.run(input, [handler]) + return res + } else { + const res = await chain.run(input) + return res + } + } +} + +const getAPIChain = async (documents: string, llm: BaseLanguageModel, headers: string, urlPrompt: string, ansPrompt: string) => { + const apiUrlPrompt = new PromptTemplate({ + inputVariables: ['api_docs', 'question'], + template: urlPrompt ? urlPrompt : API_URL_RAW_PROMPT_TEMPLATE + }) + + const apiResponsePrompt = new PromptTemplate({ + inputVariables: ['api_docs', 'question', 'api_url', 'api_response'], + template: ansPrompt ? ansPrompt : API_RESPONSE_RAW_PROMPT_TEMPLATE + }) + + const chain = APIChain.fromLLMAndAPIDocs(llm, documents, { + apiUrlPrompt, + apiResponsePrompt, + verbose: process.env.DEBUG === 'true' ? true : false, + headers: typeof headers === 'object' ? headers : headers ? JSON.parse(headers) : {} + }) + return chain +} + +module.exports = { nodeClass: GETApiChain_Chains } diff --git a/packages/components/nodes/chains/ApiChain/POSTApiChain.ts b/packages/components/nodes/chains/ApiChain/POSTApiChain.ts new file mode 100644 index 00000000000..3c6ea677297 --- /dev/null +++ b/packages/components/nodes/chains/ApiChain/POSTApiChain.ts @@ -0,0 +1,118 @@ +import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' +import { CustomChainHandler, getBaseClasses } from '../../../src/utils' +import { BaseLanguageModel } from 'langchain/base_language' +import { PromptTemplate } from 'langchain/prompts' +import { API_RESPONSE_RAW_PROMPT_TEMPLATE, API_URL_RAW_PROMPT_TEMPLATE, APIChain } from './postCore' + +class POSTApiChain_Chains implements INode { + label: string + name: string + type: string + icon: string + category: string + baseClasses: string[] + description: string + inputs: INodeParams[] + + constructor() { + this.label = 'POST API Chain' + this.name = 'postApiChain' + this.type = 'POSTApiChain' + this.icon = 'apichain.svg' + this.category = 'Chains' + this.description = 'Chain to run queries against POST API' + this.baseClasses = [this.type, ...getBaseClasses(APIChain)] + this.inputs = [ + { + label: 'Language Model', + name: 'model', + type: 'BaseLanguageModel' + }, + { + label: 'API Documentation', + name: 'apiDocs', + type: 'string', + description: + 'Description of how API works. Please refer to more examples', + rows: 4 + }, + { + label: 'Headers', + name: 'headers', + type: 'json', + additionalParams: true, + optional: true + }, + { + label: 'URL Prompt', + name: 'urlPrompt', + type: 'string', + description: 'Prompt used to tell LLMs how to construct the URL. Must contains {api_docs} and {question}', + default: API_URL_RAW_PROMPT_TEMPLATE, + rows: 4, + additionalParams: true + }, + { + label: 'Answer Prompt', + name: 'ansPrompt', + type: 'string', + description: + 'Prompt used to tell LLMs how to return the API response. Must contains {api_response}, {api_url}, and {question}', + default: API_RESPONSE_RAW_PROMPT_TEMPLATE, + rows: 4, + additionalParams: true + } + ] + } + + async init(nodeData: INodeData): Promise { + const model = nodeData.inputs?.model as BaseLanguageModel + const apiDocs = nodeData.inputs?.apiDocs as string + const headers = nodeData.inputs?.headers as string + const urlPrompt = nodeData.inputs?.urlPrompt as string + const ansPrompt = nodeData.inputs?.ansPrompt as string + + const chain = await getAPIChain(apiDocs, model, headers, urlPrompt, ansPrompt) + return chain + } + + async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { + const model = nodeData.inputs?.model as BaseLanguageModel + const apiDocs = nodeData.inputs?.apiDocs as string + const headers = nodeData.inputs?.headers as string + const urlPrompt = nodeData.inputs?.urlPrompt as string + const ansPrompt = nodeData.inputs?.ansPrompt as string + + const chain = await getAPIChain(apiDocs, model, headers, urlPrompt, ansPrompt) + if (options.socketIO && options.socketIOClientId) { + const handler = new CustomChainHandler(options.socketIO, options.socketIOClientId, 2) + const res = await chain.run(input, [handler]) + return res + } else { + const res = await chain.run(input) + return res + } + } +} + +const getAPIChain = async (documents: string, llm: BaseLanguageModel, headers: string, urlPrompt: string, ansPrompt: string) => { + const apiUrlPrompt = new PromptTemplate({ + inputVariables: ['api_docs', 'question'], + template: urlPrompt ? urlPrompt : API_URL_RAW_PROMPT_TEMPLATE + }) + + const apiResponsePrompt = new PromptTemplate({ + inputVariables: ['api_docs', 'question', 'api_url_body', 'api_response'], + template: ansPrompt ? ansPrompt : API_RESPONSE_RAW_PROMPT_TEMPLATE + }) + + const chain = APIChain.fromLLMAndAPIDocs(llm, documents, { + apiUrlPrompt, + apiResponsePrompt, + verbose: process.env.DEBUG === 'true' ? true : false, + headers: typeof headers === 'object' ? headers : headers ? JSON.parse(headers) : {} + }) + return chain +} + +module.exports = { nodeClass: POSTApiChain_Chains } diff --git a/packages/components/nodes/chains/ApiChain/postCore.ts b/packages/components/nodes/chains/ApiChain/postCore.ts new file mode 100644 index 00000000000..de7215d9248 --- /dev/null +++ b/packages/components/nodes/chains/ApiChain/postCore.ts @@ -0,0 +1,162 @@ +import { BaseLanguageModel } from 'langchain/base_language' +import { CallbackManagerForChainRun } from 'langchain/callbacks' +import { BaseChain, ChainInputs, LLMChain, SerializedAPIChain } from 'langchain/chains' +import { BasePromptTemplate, PromptTemplate } from 'langchain/prompts' +import { ChainValues } from 'langchain/schema' +import fetch from 'node-fetch' + +export const API_URL_RAW_PROMPT_TEMPLATE = `You are given the below API Documentation: +{api_docs} +Using this documentation, generate a json string with two keys: "url" and "data". +The value of "url" should be a string, which is the API url to call for answering the user question. +The value of "data" should be a dictionary of key-value pairs you want to POST to the url as a JSON body. +Be careful to always use double quotes for strings in the json string. +You should build the json string in order to get a response that is as short as possible, while still getting the necessary information to answer the question. Pay attention to deliberately exclude any unnecessary pieces of data in the API call. + +Question:{question} +json string:` + +export const API_RESPONSE_RAW_PROMPT_TEMPLATE = `${API_URL_RAW_PROMPT_TEMPLATE} {api_url_body} + +Here is the response from the API: + +{api_response} + +Summarize this response to answer the original question. + +Summary:` + +const defaultApiUrlPrompt = new PromptTemplate({ + inputVariables: ['api_docs', 'question'], + template: API_URL_RAW_PROMPT_TEMPLATE +}) + +const defaultApiResponsePrompt = new PromptTemplate({ + inputVariables: ['api_docs', 'question', 'api_url_body', 'api_response'], + template: API_RESPONSE_RAW_PROMPT_TEMPLATE +}) + +export interface APIChainInput extends Omit { + apiAnswerChain: LLMChain + apiRequestChain: LLMChain + apiDocs: string + inputKey?: string + headers?: Record + /** Key to use for output, defaults to `output` */ + outputKey?: string +} + +export type APIChainOptions = { + headers?: Record + apiUrlPrompt?: BasePromptTemplate + apiResponsePrompt?: BasePromptTemplate +} + +export class APIChain extends BaseChain implements APIChainInput { + apiAnswerChain: LLMChain + + apiRequestChain: LLMChain + + apiDocs: string + + headers = {} + + inputKey = 'question' + + outputKey = 'output' + + get inputKeys() { + return [this.inputKey] + } + + get outputKeys() { + return [this.outputKey] + } + + constructor(fields: APIChainInput) { + super(fields) + this.apiRequestChain = fields.apiRequestChain + this.apiAnswerChain = fields.apiAnswerChain + this.apiDocs = fields.apiDocs + this.inputKey = fields.inputKey ?? this.inputKey + this.outputKey = fields.outputKey ?? this.outputKey + this.headers = fields.headers ?? this.headers + } + + /** @ignore */ + async _call(values: ChainValues, runManager?: CallbackManagerForChainRun): Promise { + try { + const question: string = values[this.inputKey] + + const api_url_body = await this.apiRequestChain.predict({ question, api_docs: this.apiDocs }, runManager?.getChild()) + + const { url, data } = JSON.parse(api_url_body) + + const res = await fetch(url, { + method: 'POST', + headers: this.headers, + body: JSON.stringify(data) + }) + + const api_response = await res.text() + + const answer = await this.apiAnswerChain.predict( + { question, api_docs: this.apiDocs, api_url_body, api_response }, + runManager?.getChild() + ) + + return { [this.outputKey]: answer } + } catch (error) { + return { [this.outputKey]: error } + } + } + + _chainType() { + return 'api_chain' as const + } + + static async deserialize(data: SerializedAPIChain) { + const { api_request_chain, api_answer_chain, api_docs } = data + + if (!api_request_chain) { + throw new Error('LLMChain must have api_request_chain') + } + if (!api_answer_chain) { + throw new Error('LLMChain must have api_answer_chain') + } + if (!api_docs) { + throw new Error('LLMChain must have api_docs') + } + + return new APIChain({ + apiAnswerChain: await LLMChain.deserialize(api_answer_chain), + apiRequestChain: await LLMChain.deserialize(api_request_chain), + apiDocs: api_docs + }) + } + + serialize(): SerializedAPIChain { + return { + _type: this._chainType(), + api_answer_chain: this.apiAnswerChain.serialize(), + api_request_chain: this.apiRequestChain.serialize(), + api_docs: this.apiDocs + } + } + + static fromLLMAndAPIDocs( + llm: BaseLanguageModel, + apiDocs: string, + options: APIChainOptions & Omit = {} + ): APIChain { + const { apiUrlPrompt = defaultApiUrlPrompt, apiResponsePrompt = defaultApiResponsePrompt } = options + const apiRequestChain = new LLMChain({ prompt: apiUrlPrompt, llm }) + const apiAnswerChain = new LLMChain({ prompt: apiResponsePrompt, llm }) + return new this({ + apiAnswerChain, + apiRequestChain, + apiDocs, + ...options + }) + } +} diff --git a/packages/components/src/utils.ts b/packages/components/src/utils.ts index 68c098fddb7..08d32bab14e 100644 --- a/packages/components/src/utils.ts +++ b/packages/components/src/utils.ts @@ -207,19 +207,27 @@ export class CustomChainHandler extends BaseCallbackHandler { isLLMStarted = false socketIO: Server socketIOClientId = '' + skipK = 0 // Skip streaming for first K numbers of handleLLMStart - constructor(socketIO: Server, socketIOClientId: string) { + constructor(socketIO: Server, socketIOClientId: string, skipK?: number) { super() this.socketIO = socketIO this.socketIOClientId = socketIOClientId + this.skipK = skipK ?? this.skipK + } + + handleLLMStart() { + if (this.skipK > 0) this.skipK -= 1 } handleLLMNewToken(token: string) { - if (!this.isLLMStarted) { - this.isLLMStarted = true - this.socketIO.to(this.socketIOClientId).emit('start', token) + if (this.skipK === 0) { + if (!this.isLLMStarted) { + this.isLLMStarted = true + this.socketIO.to(this.socketIOClientId).emit('start', token) + } + this.socketIO.to(this.socketIOClientId).emit('token', token) } - this.socketIO.to(this.socketIOClientId).emit('token', token) } handleLLMEnd() { diff --git a/packages/server/marketplaces/API Agent.json b/packages/server/marketplaces/API Agent.json new file mode 100644 index 00000000000..a1a42ddb56e --- /dev/null +++ b/packages/server/marketplaces/API Agent.json @@ -0,0 +1,949 @@ +{ + "description": "Given API docs, agent automatically decide which API to call, generating url and body request from conversation", + "nodes": [ + { + "width": 300, + "height": 459, + "id": "getApiChain_0", + "position": { + "x": 1222.6923202234623, + "y": 359.97676456347756 + }, + "type": "customNode", + "data": { + "id": "getApiChain_0", + "label": "GET API Chain", + "name": "getApiChain", + "type": "GETApiChain", + "baseClasses": ["GETApiChain", "BaseChain", "BaseLangChain"], + "category": "Chains", + "description": "Chain to run queries against GET API", + "inputParams": [ + { + "label": "API Documentation", + "name": "apiDocs", + "type": "string", + "description": "Description of how API works. Please refer to more examples", + "rows": 4, + "id": "getApiChain_0-input-apiDocs-string" + }, + { + "label": "Headers", + "name": "headers", + "type": "json", + "additionalParams": true, + "optional": true, + "id": "getApiChain_0-input-headers-json" + }, + { + "label": "URL Prompt", + "name": "urlPrompt", + "type": "string", + "description": "Prompt used to tell LLMs how to construct the URL. Must contains {api_docs} and {question}", + "default": "You are given the below API Documentation:\n{api_docs}\nUsing this documentation, generate the full API url to call for answering the user question.\nYou should build the API url in order to get a response that is as short as possible, while still getting the necessary information to answer the question. Pay attention to deliberately exclude any unnecessary pieces of data in the API call.\n\nQuestion:{question}\nAPI url:", + "rows": 4, + "additionalParams": true, + "id": "getApiChain_0-input-urlPrompt-string" + }, + { + "label": "Answer Prompt", + "name": "ansPrompt", + "type": "string", + "description": "Prompt used to tell LLMs how to return the API response. Must contains {api_response}, {api_url}, and {question}", + "default": "Given this {api_response} response for {api_url}. use the given response to answer this {question}", + "rows": 4, + "additionalParams": true, + "id": "getApiChain_0-input-ansPrompt-string" + } + ], + "inputAnchors": [ + { + "label": "Language Model", + "name": "model", + "type": "BaseLanguageModel", + "id": "getApiChain_0-input-model-BaseLanguageModel" + } + ], + "inputs": { + "model": "{{chatOpenAI_1.data.instance}}", + "apiDocs": "BASE URL: https://api.open-meteo.com/\n\nAPI Documentation\nThe API endpoint /v1/forecast accepts a geographical coordinate, a list of weather variables and responds with a JSON hourly weather forecast for 7 days. Time always starts at 0:00 today and contains 168 hours. All URL parameters are listed below:\n\nParameter\tFormat\tRequired\tDefault\tDescription\nlatitude, longitude\tFloating point\tYes\t\tGeographical WGS84 coordinate of the location\nhourly\tString array\tNo\t\tA list of weather variables which should be returned. Values can be comma separated, or multiple &hourly= parameter in the URL can be used.\ndaily\tString array\tNo\t\tA list of daily weather variable aggregations which should be returned. Values can be comma separated, or multiple &daily= parameter in the URL can be used. If daily weather variables are specified, parameter timezone is required.\ncurrent_weather\tBool\tNo\tfalse\tInclude current weather conditions in the JSON output.\ntemperature_unit\tString\tNo\tcelsius\tIf fahrenheit is set, all temperature values are converted to Fahrenheit.\nwindspeed_unit\tString\tNo\tkmh\tOther wind speed speed units: ms, mph and kn\nprecipitation_unit\tString\tNo\tmm\tOther precipitation amount units: inch\ntimeformat\tString\tNo\tiso8601\tIf format unixtime is selected, all time values are returned in UNIX epoch time in seconds. Please note that all timestamp are in GMT+0! For daily values with unix timestamps, please apply utc_offset_seconds again to get the correct date.\ntimezone\tString\tNo\tGMT\tIf timezone is set, all timestamps are returned as local-time and data is returned starting at 00:00 local-time. Any time zone name from the time zone database is supported. If auto is set as a time zone, the coordinates will be automatically resolved to the local time zone.\npast_days\tInteger (0-2)\tNo\t0\tIf past_days is set, yesterday or the day before yesterday data are also returned.\nstart_date\nend_date\tString (yyyy-mm-dd)\tNo\t\tThe time interval to get weather data. A day must be specified as an ISO8601 date (e.g. 2022-06-30).\nmodels\tString array\tNo\tauto\tManually select one or more weather models. Per default, the best suitable weather models will be combined.\n\nHourly Parameter Definition\nThe parameter &hourly= accepts the following values. Most weather variables are given as an instantaneous value for the indicated hour. Some variables like precipitation are calculated from the preceding hour as an average or sum.\n\nVariable\tValid time\tUnit\tDescription\ntemperature_2m\tInstant\t°C (°F)\tAir temperature at 2 meters above ground\nsnowfall\tPreceding hour sum\tcm (inch)\tSnowfall amount of the preceding hour in centimeters. For the water equivalent in millimeter, divide by 7. E.g. 7 cm snow = 10 mm precipitation water equivalent\nrain\tPreceding hour sum\tmm (inch)\tRain from large scale weather systems of the preceding hour in millimeter\nshowers\tPreceding hour sum\tmm (inch)\tShowers from convective precipitation in millimeters from the preceding hour\nweathercode\tInstant\tWMO code\tWeather condition as a numeric code. Follow WMO weather interpretation codes. See table below for details.\nsnow_depth\tInstant\tmeters\tSnow depth on the ground\nfreezinglevel_height\tInstant\tmeters\tAltitude above sea level of the 0°C level\nvisibility\tInstant\tmeters\tViewing distance in meters. Influenced by low clouds, humidity and aerosols. Maximum visibility is approximately 24 km.", + "headers": "", + "urlPrompt": "You are given the below API Documentation:\n{api_docs}\nUsing this documentation, generate the full API url to call for answering the user question.\nYou should build the API url in order to get a response that is as short as possible, while still getting the necessary information to answer the question. Pay attention to deliberately exclude any unnecessary pieces of data in the API call.\n\nQuestion:{question}\nAPI url:", + "ansPrompt": "Given this {api_response} response for {api_url}. use the given response to answer this {question}" + }, + "outputAnchors": [ + { + "id": "getApiChain_0-output-getApiChain-GETApiChain|BaseChain|BaseLangChain", + "name": "getApiChain", + "label": "GETApiChain", + "type": "GETApiChain | BaseChain | BaseLangChain" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 1222.6923202234623, + "y": 359.97676456347756 + }, + "dragging": false + }, + { + "width": 300, + "height": 383, + "id": "conversationalAgent_0", + "position": { + "x": 1993.8540808923876, + "y": 952.6297081192247 + }, + "type": "customNode", + "data": { + "id": "conversationalAgent_0", + "label": "Conversational Agent", + "name": "conversationalAgent", + "type": "AgentExecutor", + "baseClasses": ["AgentExecutor", "BaseChain", "BaseLangChain"], + "category": "Agents", + "description": "Conversational agent for a chat model. It will utilize chat specific prompts", + "inputParams": [ + { + "label": "System Message", + "name": "systemMessage", + "type": "string", + "rows": 4, + "optional": true, + "additionalParams": true, + "id": "conversationalAgent_0-input-systemMessage-string" + }, + { + "label": "Human Message", + "name": "humanMessage", + "type": "string", + "rows": 4, + "optional": true, + "additionalParams": true, + "id": "conversationalAgent_0-input-humanMessage-string" + } + ], + "inputAnchors": [ + { + "label": "Allowed Tools", + "name": "tools", + "type": "Tool", + "list": true, + "id": "conversationalAgent_0-input-tools-Tool" + }, + { + "label": "Language Model", + "name": "model", + "type": "BaseLanguageModel", + "id": "conversationalAgent_0-input-model-BaseLanguageModel" + }, + { + "label": "Memory", + "name": "memory", + "type": "BaseChatMemory", + "id": "conversationalAgent_0-input-memory-BaseChatMemory" + } + ], + "inputs": { + "tools": ["{{chainTool_0.data.instance}}", "{{chainTool_1.data.instance}}"], + "model": "{{chatOpenAI_0.data.instance}}", + "memory": "{{bufferMemory_0.data.instance}}", + "systemMessage": "", + "humanMessage": "" + }, + "outputAnchors": [ + { + "id": "conversationalAgent_0-output-conversationalAgent-AgentExecutor|BaseChain|BaseLangChain", + "name": "conversationalAgent", + "label": "AgentExecutor", + "type": "AgentExecutor | BaseChain | BaseLangChain" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 1993.8540808923876, + "y": 952.6297081192247 + }, + "dragging": false + }, + { + "width": 300, + "height": 602, + "id": "chainTool_0", + "position": { + "x": 1600.1485877701232, + "y": 276.38970893436533 + }, + "type": "customNode", + "data": { + "id": "chainTool_0", + "label": "Chain Tool", + "name": "chainTool", + "type": "ChainTool", + "baseClasses": ["ChainTool", "DynamicTool", "Tool", "StructuredTool", "BaseLangChain"], + "category": "Tools", + "description": "Use a chain as allowed tool for agent", + "inputParams": [ + { + "label": "Chain Name", + "name": "name", + "type": "string", + "placeholder": "state-of-union-qa", + "id": "chainTool_0-input-name-string" + }, + { + "label": "Chain Description", + "name": "description", + "type": "string", + "rows": 3, + "placeholder": "State of the Union QA - useful for when you need to ask questions about the most recent state of the union address.", + "id": "chainTool_0-input-description-string" + }, + { + "label": "Return Direct", + "name": "returnDirect", + "type": "boolean", + "optional": true, + "id": "chainTool_0-input-returnDirect-boolean" + } + ], + "inputAnchors": [ + { + "label": "Base Chain", + "name": "baseChain", + "type": "BaseChain", + "id": "chainTool_0-input-baseChain-BaseChain" + } + ], + "inputs": { + "name": "weather-qa", + "description": "useful for when you need to ask question about weather", + "returnDirect": "", + "baseChain": "{{getApiChain_0.data.instance}}" + }, + "outputAnchors": [ + { + "id": "chainTool_0-output-chainTool-ChainTool|DynamicTool|Tool|StructuredTool|BaseLangChain", + "name": "chainTool", + "label": "ChainTool", + "type": "ChainTool | DynamicTool | Tool | StructuredTool | BaseLangChain" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 1600.1485877701232, + "y": 276.38970893436533 + }, + "dragging": false + }, + { + "width": 300, + "height": 524, + "id": "chatOpenAI_0", + "position": { + "x": 1270.7548070814019, + "y": 1565.864417576483 + }, + "type": "customNode", + "data": { + "id": "chatOpenAI_0", + "label": "ChatOpenAI", + "name": "chatOpenAI", + "type": "ChatOpenAI", + "baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "BaseLangChain"], + "category": "Chat Models", + "description": "Wrapper around OpenAI large language models that use the Chat endpoint", + "inputParams": [ + { + "label": "OpenAI Api Key", + "name": "openAIApiKey", + "type": "password", + "id": "chatOpenAI_0-input-openAIApiKey-password" + }, + { + "label": "Model Name", + "name": "modelName", + "type": "options", + "options": [ + { + "label": "gpt-4", + "name": "gpt-4" + }, + { + "label": "gpt-4-0314", + "name": "gpt-4-0314" + }, + { + "label": "gpt-4-32k-0314", + "name": "gpt-4-32k-0314" + }, + { + "label": "gpt-3.5-turbo", + "name": "gpt-3.5-turbo" + }, + { + "label": "gpt-3.5-turbo-0301", + "name": "gpt-3.5-turbo-0301" + } + ], + "default": "gpt-3.5-turbo", + "optional": true, + "id": "chatOpenAI_0-input-modelName-options" + }, + { + "label": "Temperature", + "name": "temperature", + "type": "number", + "default": 0.9, + "optional": true, + "id": "chatOpenAI_0-input-temperature-number" + }, + { + "label": "Max Tokens", + "name": "maxTokens", + "type": "number", + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_0-input-maxTokens-number" + }, + { + "label": "Top Probability", + "name": "topP", + "type": "number", + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_0-input-topP-number" + }, + { + "label": "Frequency Penalty", + "name": "frequencyPenalty", + "type": "number", + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_0-input-frequencyPenalty-number" + }, + { + "label": "Presence Penalty", + "name": "presencePenalty", + "type": "number", + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_0-input-presencePenalty-number" + }, + { + "label": "Timeout", + "name": "timeout", + "type": "number", + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_0-input-timeout-number" + } + ], + "inputAnchors": [], + "inputs": { + "modelName": "gpt-3.5-turbo", + "temperature": 0.9, + "maxTokens": "", + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "" + }, + "outputAnchors": [ + { + "id": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|BaseLangChain", + "name": "chatOpenAI", + "label": "ChatOpenAI", + "type": "ChatOpenAI | BaseChatModel | BaseLanguageModel | BaseLangChain" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 1270.7548070814019, + "y": 1565.864417576483 + }, + "dragging": false + }, + { + "width": 300, + "height": 376, + "id": "bufferMemory_0", + "position": { + "x": 1642.0644080121785, + "y": 1715.6131926891728 + }, + "type": "customNode", + "data": { + "id": "bufferMemory_0", + "label": "Buffer Memory", + "name": "bufferMemory", + "type": "BufferMemory", + "baseClasses": ["BufferMemory", "BaseChatMemory", "BaseMemory"], + "category": "Memory", + "description": "Remembers previous conversational back and forths directly", + "inputParams": [ + { + "label": "Memory Key", + "name": "memoryKey", + "type": "string", + "default": "chat_history", + "id": "bufferMemory_0-input-memoryKey-string" + }, + { + "label": "Input Key", + "name": "inputKey", + "type": "string", + "default": "input", + "id": "bufferMemory_0-input-inputKey-string" + } + ], + "inputAnchors": [], + "inputs": { + "memoryKey": "chat_history", + "inputKey": "input" + }, + "outputAnchors": [ + { + "id": "bufferMemory_0-output-bufferMemory-BufferMemory|BaseChatMemory|BaseMemory", + "name": "bufferMemory", + "label": "BufferMemory", + "type": "BufferMemory | BaseChatMemory | BaseMemory" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 1642.0644080121785, + "y": 1715.6131926891728 + }, + "dragging": false + }, + { + "width": 300, + "height": 524, + "id": "chatOpenAI_1", + "position": { + "x": 865.4424095725009, + "y": 350.7505181391267 + }, + "type": "customNode", + "data": { + "id": "chatOpenAI_1", + "label": "ChatOpenAI", + "name": "chatOpenAI", + "type": "ChatOpenAI", + "baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "BaseLangChain"], + "category": "Chat Models", + "description": "Wrapper around OpenAI large language models that use the Chat endpoint", + "inputParams": [ + { + "label": "OpenAI Api Key", + "name": "openAIApiKey", + "type": "password", + "id": "chatOpenAI_1-input-openAIApiKey-password" + }, + { + "label": "Model Name", + "name": "modelName", + "type": "options", + "options": [ + { + "label": "gpt-4", + "name": "gpt-4" + }, + { + "label": "gpt-4-0314", + "name": "gpt-4-0314" + }, + { + "label": "gpt-4-32k-0314", + "name": "gpt-4-32k-0314" + }, + { + "label": "gpt-3.5-turbo", + "name": "gpt-3.5-turbo" + }, + { + "label": "gpt-3.5-turbo-0301", + "name": "gpt-3.5-turbo-0301" + } + ], + "default": "gpt-3.5-turbo", + "optional": true, + "id": "chatOpenAI_1-input-modelName-options" + }, + { + "label": "Temperature", + "name": "temperature", + "type": "number", + "default": 0.9, + "optional": true, + "id": "chatOpenAI_1-input-temperature-number" + }, + { + "label": "Max Tokens", + "name": "maxTokens", + "type": "number", + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_1-input-maxTokens-number" + }, + { + "label": "Top Probability", + "name": "topP", + "type": "number", + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_1-input-topP-number" + }, + { + "label": "Frequency Penalty", + "name": "frequencyPenalty", + "type": "number", + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_1-input-frequencyPenalty-number" + }, + { + "label": "Presence Penalty", + "name": "presencePenalty", + "type": "number", + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_1-input-presencePenalty-number" + }, + { + "label": "Timeout", + "name": "timeout", + "type": "number", + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_1-input-timeout-number" + } + ], + "inputAnchors": [], + "inputs": { + "modelName": "gpt-3.5-turbo", + "temperature": 0.9, + "maxTokens": "", + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "" + }, + "outputAnchors": [ + { + "id": "chatOpenAI_1-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|BaseLangChain", + "name": "chatOpenAI", + "label": "ChatOpenAI", + "type": "ChatOpenAI | BaseChatModel | BaseLanguageModel | BaseLangChain" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 865.4424095725009, + "y": 350.7505181391267 + }, + "dragging": false + }, + { + "width": 300, + "height": 524, + "id": "chatOpenAI_2", + "position": { + "x": 587.6425146349426, + "y": 917.1494176892741 + }, + "type": "customNode", + "data": { + "id": "chatOpenAI_2", + "label": "ChatOpenAI", + "name": "chatOpenAI", + "type": "ChatOpenAI", + "baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "BaseLangChain"], + "category": "Chat Models", + "description": "Wrapper around OpenAI large language models that use the Chat endpoint", + "inputParams": [ + { + "label": "OpenAI Api Key", + "name": "openAIApiKey", + "type": "password", + "id": "chatOpenAI_2-input-openAIApiKey-password" + }, + { + "label": "Model Name", + "name": "modelName", + "type": "options", + "options": [ + { + "label": "gpt-4", + "name": "gpt-4" + }, + { + "label": "gpt-4-0314", + "name": "gpt-4-0314" + }, + { + "label": "gpt-4-32k-0314", + "name": "gpt-4-32k-0314" + }, + { + "label": "gpt-3.5-turbo", + "name": "gpt-3.5-turbo" + }, + { + "label": "gpt-3.5-turbo-0301", + "name": "gpt-3.5-turbo-0301" + } + ], + "default": "gpt-3.5-turbo", + "optional": true, + "id": "chatOpenAI_2-input-modelName-options" + }, + { + "label": "Temperature", + "name": "temperature", + "type": "number", + "default": 0.9, + "optional": true, + "id": "chatOpenAI_2-input-temperature-number" + }, + { + "label": "Max Tokens", + "name": "maxTokens", + "type": "number", + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_2-input-maxTokens-number" + }, + { + "label": "Top Probability", + "name": "topP", + "type": "number", + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_2-input-topP-number" + }, + { + "label": "Frequency Penalty", + "name": "frequencyPenalty", + "type": "number", + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_2-input-frequencyPenalty-number" + }, + { + "label": "Presence Penalty", + "name": "presencePenalty", + "type": "number", + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_2-input-presencePenalty-number" + }, + { + "label": "Timeout", + "name": "timeout", + "type": "number", + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_2-input-timeout-number" + } + ], + "inputAnchors": [], + "inputs": { + "modelName": "gpt-3.5-turbo", + "temperature": 0.9, + "maxTokens": "", + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "" + }, + "outputAnchors": [ + { + "id": "chatOpenAI_2-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|BaseLangChain", + "name": "chatOpenAI", + "label": "ChatOpenAI", + "type": "ChatOpenAI | BaseChatModel | BaseLanguageModel | BaseLangChain" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 587.6425146349426, + "y": 917.1494176892741 + }, + "dragging": false + }, + { + "width": 300, + "height": 602, + "id": "chainTool_1", + "position": { + "x": 1284.7746596034926, + "y": 895.1444797047182 + }, + "type": "customNode", + "data": { + "id": "chainTool_1", + "label": "Chain Tool", + "name": "chainTool", + "type": "ChainTool", + "baseClasses": ["ChainTool", "DynamicTool", "Tool", "StructuredTool", "BaseLangChain"], + "category": "Tools", + "description": "Use a chain as allowed tool for agent", + "inputParams": [ + { + "label": "Chain Name", + "name": "name", + "type": "string", + "placeholder": "state-of-union-qa", + "id": "chainTool_1-input-name-string" + }, + { + "label": "Chain Description", + "name": "description", + "type": "string", + "rows": 3, + "placeholder": "State of the Union QA - useful for when you need to ask questions about the most recent state of the union address.", + "id": "chainTool_1-input-description-string" + }, + { + "label": "Return Direct", + "name": "returnDirect", + "type": "boolean", + "optional": true, + "id": "chainTool_1-input-returnDirect-boolean" + } + ], + "inputAnchors": [ + { + "label": "Base Chain", + "name": "baseChain", + "type": "BaseChain", + "id": "chainTool_1-input-baseChain-BaseChain" + } + ], + "inputs": { + "name": "discord-bot", + "description": "useful for when you need to send message to Discord", + "returnDirect": "", + "baseChain": "{{postApiChain_0.data.instance}}" + }, + "outputAnchors": [ + { + "id": "chainTool_1-output-chainTool-ChainTool|DynamicTool|Tool|StructuredTool|BaseLangChain", + "name": "chainTool", + "label": "ChainTool", + "type": "ChainTool | DynamicTool | Tool | StructuredTool | BaseLangChain" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 1284.7746596034926, + "y": 895.1444797047182 + }, + "dragging": false + }, + { + "width": 300, + "height": 459, + "id": "postApiChain_0", + "position": { + "x": 933.3631140153886, + "y": 974.8756002461283 + }, + "type": "customNode", + "data": { + "id": "postApiChain_0", + "label": "POST API Chain", + "name": "postApiChain", + "type": "POSTApiChain", + "baseClasses": ["POSTApiChain", "BaseChain", "BaseLangChain"], + "category": "Chains", + "description": "Chain to run queries against POST API", + "inputParams": [ + { + "label": "API Documentation", + "name": "apiDocs", + "type": "string", + "description": "Description of how API works. Please refer to more examples", + "rows": 4, + "id": "postApiChain_0-input-apiDocs-string" + }, + { + "label": "Headers", + "name": "headers", + "type": "json", + "additionalParams": true, + "optional": true, + "id": "postApiChain_0-input-headers-json" + }, + { + "label": "URL Prompt", + "name": "urlPrompt", + "type": "string", + "description": "Prompt used to tell LLMs how to construct the URL. Must contains {api_docs} and {question}", + "default": "You are given the below API Documentation:\n{api_docs}\nUsing this documentation, generate a json string with two keys: \"url\" and \"data\".\nThe value of \"url\" should be a string, which is the API url to call for answering the user question.\nThe value of \"data\" should be a dictionary of key-value pairs you want to POST to the url as a JSON body.\nBe careful to always use double quotes for strings in the json string.\nYou should build the json string in order to get a response that is as short as possible, while still getting the necessary information to answer the question. Pay attention to deliberately exclude any unnecessary pieces of data in the API call.\n\nQuestion:{question}\njson string:", + "rows": 4, + "additionalParams": true, + "id": "postApiChain_0-input-urlPrompt-string" + }, + { + "label": "Answer Prompt", + "name": "ansPrompt", + "type": "string", + "description": "Prompt used to tell LLMs how to return the API response. Must contains {api_response}, {api_url}, and {question}", + "default": "You are given the below API Documentation:\n{api_docs}\nUsing this documentation, generate a json string with two keys: \"url\" and \"data\".\nThe value of \"url\" should be a string, which is the API url to call for answering the user question.\nThe value of \"data\" should be a dictionary of key-value pairs you want to POST to the url as a JSON body.\nBe careful to always use double quotes for strings in the json string.\nYou should build the json string in order to get a response that is as short as possible, while still getting the necessary information to answer the question. Pay attention to deliberately exclude any unnecessary pieces of data in the API call.\n\nQuestion:{question}\njson string: {api_url_body}\n\nHere is the response from the API:\n\n{api_response}\n\nSummarize this response to answer the original question.\n\nSummary:", + "rows": 4, + "additionalParams": true, + "id": "postApiChain_0-input-ansPrompt-string" + } + ], + "inputAnchors": [ + { + "label": "Language Model", + "name": "model", + "type": "BaseLanguageModel", + "id": "postApiChain_0-input-model-BaseLanguageModel" + } + ], + "inputs": { + "model": "{{chatOpenAI_2.data.instance}}", + "apiDocs": "API documentation:\nEndpoint: https://eog776prcv6dg0j.m.pipedream.net\n\nThis API is for sending Discord message\n\nQuery body table:\nmessage | string | Message to send | required\n\nResponse schema (string):\nresult | string", + "headers": "", + "urlPrompt": "You are given the below API Documentation:\n{api_docs}\nUsing this documentation, generate a json string with two keys: \"url\" and \"data\".\nThe value of \"url\" should be a string, which is the API url to call for answering the user question.\nThe value of \"data\" should be a dictionary of key-value pairs you want to POST to the url as a JSON body.\nBe careful to always use double quotes for strings in the json string.\nYou should build the json string in order to get a response that is as short as possible, while still getting the necessary information to answer the question. Pay attention to deliberately exclude any unnecessary pieces of data in the API call.\n\nQuestion:{question}\njson string:", + "ansPrompt": "You are given the below API Documentation:\n{api_docs}\nUsing this documentation, generate a json string with two keys: \"url\" and \"data\".\nThe value of \"url\" should be a string, which is the API url to call for answering the user question.\nThe value of \"data\" should be a dictionary of key-value pairs you want to POST to the url as a JSON body.\nBe careful to always use double quotes for strings in the json string.\nYou should build the json string in order to get a response that is as short as possible, while still getting the necessary information to answer the question. Pay attention to deliberately exclude any unnecessary pieces of data in the API call.\n\nQuestion:{question}\njson string: {api_url_body}\n\nHere is the response from the API:\n\n{api_response}\n\nSummarize this response to answer the original question.\n\nSummary:" + }, + "outputAnchors": [ + { + "id": "postApiChain_0-output-postApiChain-POSTApiChain|BaseChain|BaseLangChain", + "name": "postApiChain", + "label": "POSTApiChain", + "type": "POSTApiChain | BaseChain | BaseLangChain" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 933.3631140153886, + "y": 974.8756002461283 + }, + "dragging": false + } + ], + "edges": [ + { + "source": "getApiChain_0", + "sourceHandle": "getApiChain_0-output-getApiChain-GETApiChain|BaseChain|BaseLangChain", + "target": "chainTool_0", + "targetHandle": "chainTool_0-input-baseChain-BaseChain", + "type": "buttonedge", + "id": "getApiChain_0-getApiChain_0-output-getApiChain-GETApiChain|BaseChain|BaseLangChain-chainTool_0-chainTool_0-input-baseChain-BaseChain", + "data": { + "label": "" + } + }, + { + "source": "chatOpenAI_0", + "sourceHandle": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|BaseLangChain", + "target": "conversationalAgent_0", + "targetHandle": "conversationalAgent_0-input-model-BaseLanguageModel", + "type": "buttonedge", + "id": "chatOpenAI_0-chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|BaseLangChain-conversationalAgent_0-conversationalAgent_0-input-model-BaseLanguageModel", + "data": { + "label": "" + } + }, + { + "source": "bufferMemory_0", + "sourceHandle": "bufferMemory_0-output-bufferMemory-BufferMemory|BaseChatMemory|BaseMemory", + "target": "conversationalAgent_0", + "targetHandle": "conversationalAgent_0-input-memory-BaseChatMemory", + "type": "buttonedge", + "id": "bufferMemory_0-bufferMemory_0-output-bufferMemory-BufferMemory|BaseChatMemory|BaseMemory-conversationalAgent_0-conversationalAgent_0-input-memory-BaseChatMemory", + "data": { + "label": "" + } + }, + { + "source": "chatOpenAI_1", + "sourceHandle": "chatOpenAI_1-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|BaseLangChain", + "target": "getApiChain_0", + "targetHandle": "getApiChain_0-input-model-BaseLanguageModel", + "type": "buttonedge", + "id": "chatOpenAI_1-chatOpenAI_1-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|BaseLangChain-getApiChain_0-getApiChain_0-input-model-BaseLanguageModel", + "data": { + "label": "" + } + }, + { + "source": "chatOpenAI_2", + "sourceHandle": "chatOpenAI_2-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|BaseLangChain", + "target": "postApiChain_0", + "targetHandle": "postApiChain_0-input-model-BaseLanguageModel", + "type": "buttonedge", + "id": "chatOpenAI_2-chatOpenAI_2-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|BaseLangChain-postApiChain_0-postApiChain_0-input-model-BaseLanguageModel", + "data": { + "label": "" + } + }, + { + "source": "postApiChain_0", + "sourceHandle": "postApiChain_0-output-postApiChain-POSTApiChain|BaseChain|BaseLangChain", + "target": "chainTool_1", + "targetHandle": "chainTool_1-input-baseChain-BaseChain", + "type": "buttonedge", + "id": "postApiChain_0-postApiChain_0-output-postApiChain-POSTApiChain|BaseChain|BaseLangChain-chainTool_1-chainTool_1-input-baseChain-BaseChain", + "data": { + "label": "" + } + }, + { + "source": "chainTool_0", + "sourceHandle": "chainTool_0-output-chainTool-ChainTool|DynamicTool|Tool|StructuredTool|BaseLangChain", + "target": "conversationalAgent_0", + "targetHandle": "conversationalAgent_0-input-tools-Tool", + "type": "buttonedge", + "id": "chainTool_0-chainTool_0-output-chainTool-ChainTool|DynamicTool|Tool|StructuredTool|BaseLangChain-conversationalAgent_0-conversationalAgent_0-input-tools-Tool", + "data": { + "label": "" + } + }, + { + "source": "chainTool_1", + "sourceHandle": "chainTool_1-output-chainTool-ChainTool|DynamicTool|Tool|StructuredTool|BaseLangChain", + "target": "conversationalAgent_0", + "targetHandle": "conversationalAgent_0-input-tools-Tool", + "type": "buttonedge", + "id": "chainTool_1-chainTool_1-output-chainTool-ChainTool|DynamicTool|Tool|StructuredTool|BaseLangChain-conversationalAgent_0-conversationalAgent_0-input-tools-Tool", + "data": { + "label": "" + } + } + ] +}