diff --git a/packages/components/nodes/chatmodels/ChatHuggingFace/ChatHuggingFace.ts b/packages/components/nodes/chatmodels/ChatHuggingFace/ChatHuggingFace.ts new file mode 100644 index 00000000000..3252a61a768 --- /dev/null +++ b/packages/components/nodes/chatmodels/ChatHuggingFace/ChatHuggingFace.ts @@ -0,0 +1,103 @@ +import { INode, INodeData, INodeParams } from '../../../src/Interface' +import { getBaseClasses } from '../../../src/utils' +import { HFInput, HuggingFaceInference } from 'langchain/llms/hf' + +class ChatHuggingFace_ChatModels implements INode { + label: string + name: string + type: string + icon: string + category: string + description: string + baseClasses: string[] + inputs: INodeParams[] + + constructor() { + this.label = 'ChatHuggingFace' + this.name = 'chatHuggingFace' + this.type = 'ChatHuggingFace' + this.icon = 'huggingface.png' + this.category = 'Chat Models' + this.description = 'Wrapper around HuggingFace large language models' + this.baseClasses = [this.type, 'BaseChatModel', ...getBaseClasses(HuggingFaceInference)] + this.inputs = [ + { + label: 'Model', + name: 'model', + type: 'string', + placeholder: 'gpt2' + }, + { + label: 'HuggingFace Api Key', + name: 'apiKey', + type: 'password' + }, + { + label: 'Temperature', + name: 'temperature', + type: 'number', + description: 'Temperature parameter may not apply to certain model. Please check available model parameters', + optional: true, + additionalParams: true + }, + { + label: 'Max Tokens', + name: 'maxTokens', + type: 'number', + description: 'Max Tokens parameter may not apply to certain model. Please check available model parameters', + optional: true, + additionalParams: true + }, + { + label: 'Top Probability', + name: 'topP', + type: 'number', + description: 'Top Probability parameter may not apply to certain model. Please check available model parameters', + optional: true, + additionalParams: true + }, + { + label: 'Top K', + name: 'hfTopK', + type: 'number', + description: 'Top K parameter may not apply to certain model. Please check available model parameters', + optional: true, + additionalParams: true + }, + { + label: 'Frequency Penalty', + name: 'frequencyPenalty', + type: 'number', + description: 'Frequency Penalty parameter may not apply to certain model. Please check available model parameters', + optional: true, + additionalParams: true + } + ] + } + + async init(nodeData: INodeData): Promise { + const model = nodeData.inputs?.model as string + const apiKey = nodeData.inputs?.apiKey as string + const temperature = nodeData.inputs?.temperature as string + const maxTokens = nodeData.inputs?.maxTokens as string + const topP = nodeData.inputs?.topP as string + const hfTopK = nodeData.inputs?.hfTopK as string + const frequencyPenalty = nodeData.inputs?.frequencyPenalty as string + + const obj: Partial = { + model, + apiKey + } + + if (temperature) obj.temperature = parseInt(temperature, 10) + if (maxTokens) obj.maxTokens = parseInt(maxTokens, 10) + if (topP) obj.topP = parseInt(topP, 10) + if (hfTopK) obj.topK = parseInt(hfTopK, 10) + if (frequencyPenalty) obj.frequencyPenalty = parseInt(frequencyPenalty, 10) + + const huggingFace = new HuggingFaceInference(obj) + return huggingFace + } +} + +module.exports = { nodeClass: ChatHuggingFace_ChatModels } diff --git a/packages/components/nodes/chatmodels/ChatHuggingFace/huggingface.png b/packages/components/nodes/chatmodels/ChatHuggingFace/huggingface.png new file mode 100644 index 00000000000..f8f202a4630 Binary files /dev/null and b/packages/components/nodes/chatmodels/ChatHuggingFace/huggingface.png differ diff --git a/packages/components/nodes/llms/HuggingFaceInference/HuggingFaceInference.ts b/packages/components/nodes/llms/HuggingFaceInference/HuggingFaceInference.ts index 6aa3f4f4ff5..88a7db07e12 100644 --- a/packages/components/nodes/llms/HuggingFaceInference/HuggingFaceInference.ts +++ b/packages/components/nodes/llms/HuggingFaceInference/HuggingFaceInference.ts @@ -1,6 +1,6 @@ import { INode, INodeData, INodeParams } from '../../../src/Interface' import { getBaseClasses } from '../../../src/utils' -import { HuggingFaceInference } from 'langchain/llms/hf' +import { HFInput, HuggingFaceInference } from 'langchain/llms/hf' class HuggingFaceInference_LLMs implements INode { label: string @@ -31,6 +31,46 @@ class HuggingFaceInference_LLMs implements INode { label: 'HuggingFace Api Key', name: 'apiKey', type: 'password' + }, + { + label: 'Temperature', + name: 'temperature', + type: 'number', + description: 'Temperature parameter may not apply to certain model. Please check available model parameters', + optional: true, + additionalParams: true + }, + { + label: 'Max Tokens', + name: 'maxTokens', + type: 'number', + description: 'Max Tokens parameter may not apply to certain model. Please check available model parameters', + optional: true, + additionalParams: true + }, + { + label: 'Top Probability', + name: 'topP', + type: 'number', + description: 'Top Probability parameter may not apply to certain model. Please check available model parameters', + optional: true, + additionalParams: true + }, + { + label: 'Top K', + name: 'hfTopK', + type: 'number', + description: 'Top K parameter may not apply to certain model. Please check available model parameters', + optional: true, + additionalParams: true + }, + { + label: 'Frequency Penalty', + name: 'frequencyPenalty', + type: 'number', + description: 'Frequency Penalty parameter may not apply to certain model. Please check available model parameters', + optional: true, + additionalParams: true } ] } @@ -38,11 +78,24 @@ class HuggingFaceInference_LLMs implements INode { async init(nodeData: INodeData): Promise { const model = nodeData.inputs?.model as string const apiKey = nodeData.inputs?.apiKey as string + const temperature = nodeData.inputs?.temperature as string + const maxTokens = nodeData.inputs?.maxTokens as string + const topP = nodeData.inputs?.topP as string + const hfTopK = nodeData.inputs?.hfTopK as string + const frequencyPenalty = nodeData.inputs?.frequencyPenalty as string - const huggingFace = new HuggingFaceInference({ + const obj: Partial = { model, apiKey - }) + } + + if (temperature) obj.temperature = parseInt(temperature, 10) + if (maxTokens) obj.maxTokens = parseInt(maxTokens, 10) + if (topP) obj.topP = parseInt(topP, 10) + if (hfTopK) obj.topK = parseInt(hfTopK, 10) + if (frequencyPenalty) obj.frequencyPenalty = parseInt(frequencyPenalty, 10) + + const huggingFace = new HuggingFaceInference(obj) return huggingFace } } diff --git a/packages/server/marketplaces/HuggingFace LLM Chain.json b/packages/server/marketplaces/HuggingFace LLM Chain.json new file mode 100644 index 00000000000..9d3492c6b95 --- /dev/null +++ b/packages/server/marketplaces/HuggingFace LLM Chain.json @@ -0,0 +1,274 @@ +{ + "description": "Simple LLM Chain using HuggingFace Inference API on falcon-7b-instruct model", + "nodes": [ + { + "width": 300, + "height": 532, + "id": "promptTemplate_1", + "position": { + "x": 514.5434056794296, + "y": 507.47798128037107 + }, + "type": "customNode", + "data": { + "id": "promptTemplate_1", + "label": "Prompt Template", + "name": "promptTemplate", + "type": "PromptTemplate", + "baseClasses": ["PromptTemplate", "BaseStringPromptTemplate", "BasePromptTemplate"], + "category": "Prompts", + "description": "Schema to represent a basic prompt for an LLM", + "inputParams": [ + { + "label": "Template", + "name": "template", + "type": "string", + "rows": 4, + "placeholder": "What is a good name for a company that makes {product}?", + "id": "promptTemplate_1-input-template-string" + }, + { + "label": "Format Prompt Values", + "name": "promptValues", + "type": "string", + "rows": 4, + "placeholder": "{\n \"input_language\": \"English\",\n \"output_language\": \"French\"\n}", + "optional": true, + "acceptVariable": true, + "list": true, + "id": "promptTemplate_1-input-promptValues-string" + } + ], + "inputAnchors": [], + "inputs": { + "template": "Question: {question}\n\nAnswer: Let's think step by step.", + "promptValues": "" + }, + "outputAnchors": [ + { + "id": "promptTemplate_1-output-promptTemplate-PromptTemplate|BaseStringPromptTemplate|BasePromptTemplate", + "name": "promptTemplate", + "label": "PromptTemplate", + "type": "PromptTemplate | BaseStringPromptTemplate | BasePromptTemplate" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 514.5434056794296, + "y": 507.47798128037107 + }, + "dragging": false + }, + { + "width": 300, + "height": 405, + "id": "llmChain_1", + "position": { + "x": 970.9254258940236, + "y": 320.56761595884564 + }, + "type": "customNode", + "data": { + "id": "llmChain_1", + "label": "LLM Chain", + "name": "llmChain", + "type": "LLMChain", + "baseClasses": ["LLMChain", "BaseChain", "BaseLangChain"], + "category": "Chains", + "description": "Chain to run queries against LLMs", + "inputParams": [ + { + "label": "Chain Name", + "name": "chainName", + "type": "string", + "placeholder": "Name Your Chain", + "optional": true, + "id": "llmChain_1-input-chainName-string" + } + ], + "inputAnchors": [ + { + "label": "Language Model", + "name": "model", + "type": "BaseLanguageModel", + "id": "llmChain_1-input-model-BaseLanguageModel" + }, + { + "label": "Prompt", + "name": "prompt", + "type": "BasePromptTemplate", + "id": "llmChain_1-input-prompt-BasePromptTemplate" + } + ], + "inputs": { + "model": "{{huggingFaceInference_LLMs_0.data.instance}}", + "prompt": "{{promptTemplate_1.data.instance}}", + "chainName": "" + }, + "outputAnchors": [ + { + "name": "output", + "label": "Output", + "type": "options", + "options": [ + { + "id": "llmChain_1-output-llmChain-LLMChain|BaseChain|BaseLangChain", + "name": "llmChain", + "label": "LLM Chain", + "type": "LLMChain | BaseChain | BaseLangChain" + }, + { + "id": "llmChain_1-output-outputPrediction-string", + "name": "outputPrediction", + "label": "Output Prediction", + "type": "string" + } + ], + "default": "llmChain" + } + ], + "outputs": { + "output": "llmChain" + }, + "selected": false + }, + "positionAbsolute": { + "x": 970.9254258940236, + "y": 320.56761595884564 + }, + "selected": false, + "dragging": false + }, + { + "width": 300, + "height": 427, + "id": "huggingFaceInference_LLMs_0", + "position": { + "x": 503.5630827259226, + "y": 50.79125094823999 + }, + "type": "customNode", + "data": { + "id": "huggingFaceInference_LLMs_0", + "label": "HuggingFace Inference", + "name": "huggingFaceInference_LLMs", + "type": "HuggingFaceInference", + "baseClasses": ["HuggingFaceInference", "LLM", "BaseLLM", "BaseLanguageModel", "BaseLangChain"], + "category": "LLMs", + "description": "Wrapper around HuggingFace large language models", + "inputParams": [ + { + "label": "Model", + "name": "model", + "type": "string", + "placeholder": "gpt2", + "id": "huggingFaceInference_LLMs_0-input-model-string" + }, + { + "label": "HuggingFace Api Key", + "name": "apiKey", + "type": "password", + "id": "huggingFaceInference_LLMs_0-input-apiKey-password" + }, + { + "label": "Temperature", + "name": "temperature", + "type": "number", + "description": "Temperature parameter may not apply to certain model. Please check available model parameters", + "optional": true, + "additionalParams": true, + "id": "huggingFaceInference_LLMs_0-input-temperature-number" + }, + { + "label": "Max Tokens", + "name": "maxTokens", + "type": "number", + "description": "Max Tokens parameter may not apply to certain model. Please check available model parameters", + "optional": true, + "additionalParams": true, + "id": "huggingFaceInference_LLMs_0-input-maxTokens-number" + }, + { + "label": "Top Probability", + "name": "topP", + "type": "number", + "description": "Top Probability parameter may not apply to certain model. Please check available model parameters", + "optional": true, + "additionalParams": true, + "id": "huggingFaceInference_LLMs_0-input-topP-number" + }, + { + "label": "Top K", + "name": "hfTopK", + "type": "number", + "description": "Top K parameter may not apply to certain model. Please check available model parameters", + "optional": true, + "additionalParams": true, + "id": "huggingFaceInference_LLMs_0-input-hfTopK-number" + }, + { + "label": "Frequency Penalty", + "name": "frequencyPenalty", + "type": "number", + "description": "Frequency Penalty parameter may not apply to certain model. Please check available model parameters", + "optional": true, + "additionalParams": true, + "id": "huggingFaceInference_LLMs_0-input-frequencyPenalty-number" + } + ], + "inputAnchors": [], + "inputs": { + "model": "tiiuae/falcon-7b-instruct", + "temperature": "", + "maxTokens": "200", + "topP": "", + "hfTopK": "10", + "frequencyPenalty": "" + }, + "outputAnchors": [ + { + "id": "huggingFaceInference_LLMs_0-output-huggingFaceInference_LLMs-HuggingFaceInference|LLM|BaseLLM|BaseLanguageModel|BaseLangChain", + "name": "huggingFaceInference_LLMs", + "label": "HuggingFaceInference", + "type": "HuggingFaceInference | LLM | BaseLLM | BaseLanguageModel | BaseLangChain" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 503.5630827259226, + "y": 50.79125094823999 + }, + "dragging": false + } + ], + "edges": [ + { + "source": "promptTemplate_1", + "sourceHandle": "promptTemplate_1-output-promptTemplate-PromptTemplate|BaseStringPromptTemplate|BasePromptTemplate", + "target": "llmChain_1", + "targetHandle": "llmChain_1-input-prompt-BasePromptTemplate", + "type": "buttonedge", + "id": "promptTemplate_1-promptTemplate_1-output-promptTemplate-PromptTemplate|BaseStringPromptTemplate|BasePromptTemplate-llmChain_1-llmChain_1-input-prompt-BasePromptTemplate", + "data": { + "label": "" + } + }, + { + "source": "huggingFaceInference_LLMs_0", + "sourceHandle": "huggingFaceInference_LLMs_0-output-huggingFaceInference_LLMs-HuggingFaceInference|LLM|BaseLLM|BaseLanguageModel|BaseLangChain", + "target": "llmChain_1", + "targetHandle": "llmChain_1-input-model-BaseLanguageModel", + "type": "buttonedge", + "id": "huggingFaceInference_LLMs_0-huggingFaceInference_LLMs_0-output-huggingFaceInference_LLMs-HuggingFaceInference|LLM|BaseLLM|BaseLanguageModel|BaseLangChain-llmChain_1-llmChain_1-input-model-BaseLanguageModel", + "data": { + "label": "" + } + } + ] +}