From c75d023c1af13f218346f67b2f2e7f09df55003a Mon Sep 17 00:00:00 2001 From: Toocky <115723035+Toocky@users.noreply.github.com> Date: Fri, 23 May 2025 01:05:51 +0100 Subject: [PATCH 01/10] Update APIpie components with the following: - Updated Readme Docs - Create Chat completions request with optional tools - Create image - Create text to speech - Retrive list of LLM models - Retrieve list of Image models - Retrieve list of Voice models - Retrieve list of Voices --- components/apipie_ai/README.md | 35 +++ .../convert-text-to-speech.mjs | 73 +++++ .../actions/create-image/create-image.mjs | 78 +++++ .../retrieve-available-image-models.mjs | 20 ++ .../retrieve-available-llm-models.mjs | 20 ++ .../retrieve-available-tts-models.mjs | 20 ++ .../retrieve-available-tts-voices-models.mjs | 20 ++ .../send-chat-completion-request.mjs | 188 ++++++++++++ components/apipie_ai/apipie_ai.app.mjs | 281 +++++++++++++++++- components/apipie_ai/common/constants.mjs | 72 +++++ components/apipie_ai/common/utils.mjs | 24 ++ components/apipie_ai/package.json | 8 +- 12 files changed, 832 insertions(+), 7 deletions(-) create mode 100644 components/apipie_ai/README.md create mode 100644 components/apipie_ai/actions/convert-text-to-speech/convert-text-to-speech.mjs create mode 100644 components/apipie_ai/actions/create-image/create-image.mjs create mode 100644 components/apipie_ai/actions/retrieve-available-image-models/retrieve-available-image-models.mjs create mode 100644 components/apipie_ai/actions/retrieve-available-llm-models/retrieve-available-llm-models.mjs create mode 100644 components/apipie_ai/actions/retrieve-available-tts-models/retrieve-available-tts-models.mjs create mode 100644 components/apipie_ai/actions/retrieve-available-tts-voices-models/retrieve-available-tts-voices-models.mjs create mode 100644 components/apipie_ai/actions/send-chat-completion-request/send-chat-completion-request.mjs create mode 100644 components/apipie_ai/common/constants.mjs create mode 100644 components/apipie_ai/common/utils.mjs diff --git a/components/apipie_ai/README.md b/components/apipie_ai/README.md new file mode 100644 index 0000000000000..582380f8b0412 --- /dev/null +++ b/components/apipie_ai/README.md @@ -0,0 +1,35 @@ +# Overview + +[APIpie.ai](https://apipie.ai) connects developers with open-source and commercial LLMs via a unified API. With zero infrastructure setup, you can send requests to popular models, switch providers instantly, and explore a growing catalog of AI models—all through one endpoint. Here's an overview of the services offered by [APIpie's API](https://apipie.ai): + +- **Model Discovery**: List and explore available LLM models from various providers as seen on the [APIpie Dashboard](https://apipie.ai/dashboard) +- **Chat Completions**: Send messages to any supported model and receive AI-generated responses + +Use Python or Node.js code to make fully authenticated API requests with your APIpie account, enabling you to prototype, test, or integrate LLM responses into apps, emails, alerts, dashboards, and more. + +# Example Use Cases + +The [APIpie API](https://apipie.ai) can be leveraged in a wide range of business contexts to drive efficiency, enhance customer experiences, and innovate product offerings through unified access to multiple AI models. Here are some specific business use cases for utilizing the APIpie API: + +### **Customer Support Automation** + +Significantly reduce response times and free up human agents to tackle more complex issues by automating customer support ticket responses. Use the List Models action to dynamically select the most appropriate AI model based on ticket complexity or language requirements, then leverage Chat Completions to generate contextual, helpful responses that can be reviewed before sending to customers. + +### **Content Creation and Management** + +Utilize AI to generate high-quality content for blogs, articles, product descriptions, and marketing material. Create workflows that automatically test different models using the same prompt to compare writing styles, then select the best output for your brand voice. APIpie's unified interface lets you experiment with various open-source and commercial models without managing multiple API integrations. + +### **Multi-Model AI Experimentation Framework** + +Build intelligent systems that automatically compare AI model performance across different use cases. Set up workflows that send identical prompts to multiple models simultaneously, collect responses in databases, and analyze quality, cost, and latency differences. This enables data-driven decisions about which AI models work best for specific business scenarios while maintaining the flexibility to switch providers as new models become available. + +# Getting Started + +First, sign up for an APIpie account, then in a new workflow step open the APIpie app and select one of the available actions: + +- **List Models**: Fetch the current catalog of available AI models +- **Chat**: Send messages to any supported model and receive responses + +Then connect your APIpie account to Pipedream. Visit [APIpie.ai](https://apipie.ai) and navigate to your profile to generate your [API key.](https://apipie.ai/profile/api-keys) + +Copy your API key and paste it into Pipedream when prompted. Now you're all set to use pre-built actions like `Chat` or `List Models`, or use your APIpie API key directly in Node.js or Python code to access the unified AI model interface. diff --git a/components/apipie_ai/actions/convert-text-to-speech/convert-text-to-speech.mjs b/components/apipie_ai/actions/convert-text-to-speech/convert-text-to-speech.mjs new file mode 100644 index 0000000000000..2a724b97165b8 --- /dev/null +++ b/components/apipie_ai/actions/convert-text-to-speech/convert-text-to-speech.mjs @@ -0,0 +1,73 @@ +import fs from "fs"; +import apipieAi from "../../apipie_ai.app.mjs"; + +export default { + key: "apipie_ai-convert-text-to-speech", + name: "Convert Text to Speech (TTS)", + description: "Generates audio from the input text. [See the documentation](https://apipie.ai/docs/Features/Voices)", + version: "0.0.1", + type: "action", + props: { + apipieAi, + model: { + propDefinition: [ + apipieAi, + "ttsModelId", + ], + }, + input: { + propDefinition: [ + apipieAi, + "input", + ], + }, + voice: { + propDefinition: [ + apipieAi, + "voice", + ], + }, + responseFormat: { + propDefinition: [ + apipieAi, + "audioResponseFormat", + ], + }, + speed: { + propDefinition: [ + apipieAi, + "speed", + ], + }, + outputFile: { + type: "string", + label: "Output Filename", + description: "The filename of the output audio file that will be written to the `/tmp` folder, e.g. `/tmp/myFile.mp3`", + }, + }, + async run({ $ }) { + const response = await this.apipieAi.createSpeech({ + $, + data: { + model: this.model, + input: this.input, + voice: this.voice, + response_format: this.responseFormat, + speed: Number(this.speed), + }, + responseType: "arraybuffer", + }); + + const outputFilePath = this.outputFile.includes("tmp/") + ? this.outputFile + : `/tmp/${this.outputFile}`; + + await fs.promises.writeFile(outputFilePath, Buffer.from(response)); + + $.export("$summary", "Generated audio successfully"); + return { + outputFilePath, + response, + }; + }, +}; diff --git a/components/apipie_ai/actions/create-image/create-image.mjs b/components/apipie_ai/actions/create-image/create-image.mjs new file mode 100644 index 0000000000000..23282efa6b2e6 --- /dev/null +++ b/components/apipie_ai/actions/create-image/create-image.mjs @@ -0,0 +1,78 @@ +import apipieAi from "../../apipie_ai.app.mjs"; + +export default { + name: "Create Image", + version: "0.0.1", + key: "apipie_ai-create-image", + description: "Creates an image given a prompt returning a URL to the image. [See the documentation](https://apipie.ai/docs/Features/Images)", + type: "action", + props: { + apipieAi, + model: { + propDefinition: [ + apipieAi, + "imageModelId", + ], + }, + prompt: { + propDefinition: [ + apipieAi, + "prompt", + ], + }, + responseFormat: { + propDefinition: [ + apipieAi, + "imageResponseFormat", + ], + }, + size: { + propDefinition: [ + apipieAi, + "size", + ], + }, + n: { + propDefinition: [ + apipieAi, + "n", + ], + }, + quality: { + propDefinition: [ + apipieAi, + "quality", + ], + }, + style: { + propDefinition: [ + apipieAi, + "style", + ], + }, + }, + async run({ $ }) { + const response = await this.apipieAi.createImage({ + $, + data: { + prompt: this.prompt, + n: this.n, + size: this.size, + response_format: this.responseFormat === "url" + ? this.responseFormat + : "b64_json", + model: this.model, + quality: this.quality, + style: this.style, + }, + }); + + if (response.data.length) { + $.export("$summary", `Successfully created ${response.data.length} image${response.data.length === 1 + ? "" + : "s"}`); + } + + return response; + }, +}; diff --git a/components/apipie_ai/actions/retrieve-available-image-models/retrieve-available-image-models.mjs b/components/apipie_ai/actions/retrieve-available-image-models/retrieve-available-image-models.mjs new file mode 100644 index 0000000000000..1c697213b4bbc --- /dev/null +++ b/components/apipie_ai/actions/retrieve-available-image-models/retrieve-available-image-models.mjs @@ -0,0 +1,20 @@ +import apipieAi from "../../apipie_ai.app.mjs"; + +export default { + key: "apipie_ai-retrieve-available-image-models", + name: "Retrieve Available Image Models", + version: "0.0.1", + description: "Returns a list of Image models available through the API. [See the dashboard](https://apipie.ai/dashboard)", + type: "action", + props: { + apipieAi, + }, + async run({ $ }) { + const response = await this.apipieAi.listImageModels({ + $, + }); + + $.export("$summary", `Successfully retrieved ${response.data.length} available Image model(s)!`); + return response; + }, +}; diff --git a/components/apipie_ai/actions/retrieve-available-llm-models/retrieve-available-llm-models.mjs b/components/apipie_ai/actions/retrieve-available-llm-models/retrieve-available-llm-models.mjs new file mode 100644 index 0000000000000..52f6a572330c0 --- /dev/null +++ b/components/apipie_ai/actions/retrieve-available-llm-models/retrieve-available-llm-models.mjs @@ -0,0 +1,20 @@ +import apipieAi from "../../apipie_ai.app.mjs"; + +export default { + key: "apipie_ai-retrieve-available-llm-models", + name: "Retrieve Available LLM Models", + version: "0.0.1", + description: "Returns a list of LLM models available through the API. [See the dashboard](https://apipie.ai/dashboard)", + type: "action", + props: { + apipieAi, + }, + async run({ $ }) { + const response = await this.apipieAi.listLlmModels({ + $, + }); + + $.export("$summary", `Successfully retrieved ${response.data.length} available LLM model(s)!`); + return response; + }, +}; diff --git a/components/apipie_ai/actions/retrieve-available-tts-models/retrieve-available-tts-models.mjs b/components/apipie_ai/actions/retrieve-available-tts-models/retrieve-available-tts-models.mjs new file mode 100644 index 0000000000000..721f1e46e5685 --- /dev/null +++ b/components/apipie_ai/actions/retrieve-available-tts-models/retrieve-available-tts-models.mjs @@ -0,0 +1,20 @@ +import apipieAi from "../../apipie_ai.app.mjs"; + +export default { + key: "apipie_ai-retrieve-available-tts-models", + name: "Retrieve Available TTS Models", + version: "0.0.1", + description: "Returns a list of TTS models available through the API. [See the dashboard](https://apipie.ai/dashboard)", + type: "action", + props: { + apipieAi, + }, + async run({ $ }) { + const response = await this.apipieAi.listTtsModels({ + $, + }); + + $.export("$summary", `Successfully retrieved ${response.data.length} available TTS model(s)!`); + return response; + }, +}; diff --git a/components/apipie_ai/actions/retrieve-available-tts-voices-models/retrieve-available-tts-voices-models.mjs b/components/apipie_ai/actions/retrieve-available-tts-voices-models/retrieve-available-tts-voices-models.mjs new file mode 100644 index 0000000000000..8dd32d18a2e65 --- /dev/null +++ b/components/apipie_ai/actions/retrieve-available-tts-voices-models/retrieve-available-tts-voices-models.mjs @@ -0,0 +1,20 @@ +import apipieAi from "../../apipie_ai.app.mjs"; + +export default { + key: "apipie_ai-retrieve-available-tts-voices-models", + name: "Retrieve Available TTS Voices", + version: "0.0.1", + description: "Returns a list of TTS Voices available through the API. [See the dashboard](https://apipie.ai/dashboard)", + type: "action", + props: { + apipieAi, + }, + async run({ $ }) { + const response = await this.apipieAi.listVoices({ + $, + }); + + $.export("$summary", `Successfully retrieved ${response.data.length} available TTS Voices!`); + return response; + }, +}; diff --git a/components/apipie_ai/actions/send-chat-completion-request/send-chat-completion-request.mjs b/components/apipie_ai/actions/send-chat-completion-request/send-chat-completion-request.mjs new file mode 100644 index 0000000000000..c65aa938d9947 --- /dev/null +++ b/components/apipie_ai/actions/send-chat-completion-request/send-chat-completion-request.mjs @@ -0,0 +1,188 @@ +import { ConfigurationError } from "@pipedream/platform"; +import { parseObject } from "../../common/utils.mjs"; +import constants from "../../common/constants.mjs"; +import apipieAi from "../../apipie_ai.app.mjs"; + +export default { + key: "apipie_ai-send-chat-completion-request", + name: "Send Chat Completion Request", + version: "0.0.1", + description: "Send a chat completion request to a selected LLM model. [See the dashboard](https://apipie.ai/dashboard)", + type: "action", + props: { + apipieAi, + model: { + propDefinition: [ + apipieAi, + "chatCompletionModelId", + ], + }, + messages: { + type: "string[]", + label: "Messages", + description: "A list of objects containing role and content. E.g. **{\"role\":\"user\", \"content\":\"text\"}**. [See the documentation](https://apipie.ai/docs/Features/Completions) for further details.", + }, + maxTokens: { + propDefinition: [ + apipieAi, + "maxTokens", + ], + }, + temperature: { + propDefinition: [ + apipieAi, + "temperature", + ], + }, + seed: { + propDefinition: [ + apipieAi, + "seed", + ], + }, + topP: { + propDefinition: [ + apipieAi, + "topP", + ], + }, + topK: { + propDefinition: [ + apipieAi, + "topK", + ], + }, + frequencyPenalty: { + propDefinition: [ + apipieAi, + "frequencyPenalty", + ], + }, + presencePenalty: { + propDefinition: [ + apipieAi, + "presencePenalty", + ], + }, + repetitionPenalty: { + propDefinition: [ + apipieAi, + "repetitionPenalty", + ], + }, + reasoningEffort: { + propDefinition: [ + apipieAi, + "reasoningEffort", + ], + }, + toolTypes: { + type: "string[]", + label: "Tool Types", + description: "The types of tools to enable on the assistant", + options: constants.TOOL_TYPES?.filter((toolType) => toolType === "function") || ["function"], + optional: true, + reloadProps: true, + }, + }, + additionalProps() { + const { + toolTypes, + numberOfFunctions, + } = this; + const props = {}; + + if (toolTypes?.includes("function")) { + props.numberOfFunctions = { + type: "integer", + label: "Number of Functions", + description: "The number of functions to define", + optional: true, + reloadProps: true, + default: 1, + }; + + for (let i = 0; i < (numberOfFunctions || 1); i++) { + props[`functionName_${i}`] = { + type: "string", + label: `Function Name ${i + 1}`, + description: "The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.", + }; + props[`functionDescription_${i}`] = { + type: "string", + label: `Function Description ${i + 1}`, + description: "A description of what the function does, used by the model to choose when and how to call the function.", + optional: true, + }; + props[`functionParameters_${i}`] = { + type: "object", + label: `Function Parameters ${i + 1}`, + description: "The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/text-generation/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format.", + optional: true, + }; + } + } + + return props; + }, + methods: { + _buildTools() { + const tools = this.toolTypes?.filter((toolType) => toolType !== "function")?.map((toolType) => ({ + type: toolType, + })) || []; + + if (this.toolTypes?.includes("function")) { + const numberOfFunctions = this.numberOfFunctions || 1; + for (let i = 0; i < numberOfFunctions; i++) { + if (this[`functionName_${i}`]) { + tools.push({ + type: "function", + function: { + name: this[`functionName_${i}`], + description: this[`functionDescription_${i}`], + parameters: this[`functionParameters_${i}`], + }, + }); + } + } + } + + return tools.length ? tools : undefined; + }, + }, + async run({ $ }) { + const data = { + model: this.model, + messages: parseObject(this.messages), + stream: false, + }; + + // Add optional parameters only if they exist + if (this.maxTokens) data.max_tokens = this.maxTokens; + if (this.temperature) data.temperature = parseFloat(this.temperature); + if (this.seed) data.seed = this.seed; + if (this.topP) data.top_p = parseFloat(this.topP); + if (this.topK) data.top_k = this.topK; + if (this.frequencyPenalty) data.frequency_penalty = parseFloat(this.frequencyPenalty); + if (this.presencePenalty) data.presence_penalty = parseFloat(this.presencePenalty); + if (this.repetitionPenalty) data.repetition_penalty = parseFloat(this.repetitionPenalty); + if (this.reasoningEffort) data.reasoning_effort = this.reasoningEffort; + + // Add tools if they exist + const tools = this._buildTools(); + if (tools) data.tools = tools; + + const response = await this.apipieAi.sendChatCompetionRequest({ + $, + data, + timeout: 1000 * 60 * 5, + }); + + if (response.error) { + throw new ConfigurationError(response.error.message); + } + + $.export("$summary", `A new chat completion request with Id: ${response.id} was successfully created!`); + return response; + }, +}; diff --git a/components/apipie_ai/apipie_ai.app.mjs b/components/apipie_ai/apipie_ai.app.mjs index dc830e6a1a949..9b3074912cd7a 100644 --- a/components/apipie_ai/apipie_ai.app.mjs +++ b/components/apipie_ai/apipie_ai.app.mjs @@ -1,11 +1,282 @@ +import { axios } from "@pipedream/platform"; +import constants from "./common/constants.mjs"; + export default { type: "app", app: "apipie_ai", - propDefinitions: {}, + propDefinitions: { + chatCompletionModelId: { + type: "string", + label: "Completions Model", + description: "The ID of the LLM model to use for completions.", + async options() { + const { data } = await this.listLlmModels(); + + return data.map(({ + id: value, name: label, + }) => ({ + label, + value, + })); + }, + }, + imageModelId: { + type: "string", + label: "Model", + description: "The ID of the image model to use for completions.", + async options() { + const { data } = await this.listImageModels(); + + return data.map(({ + id: value, name: label, + }) => ({ + label, + value, + })); + }, + }, + ttsModelId: { + type: "string", + label: "Model", + description: "The ID of the tts model to use for completions.", + async options() { + const { data } = await this.listTtsModels(); + + return data.map(({ + id: value, name: label, + }) => ({ + label, + value, + })); + }, + }, + maxTokens: { + type: "integer", + label: "Max Tokens", + description: "Maximum number of tokens. **(range: [1, context_length))**.", + min: 1, + optional: true, + }, + temperature: { + type: "string", + label: "Temperature", + description: "Sampling temperature. **(range: [0, 2])**.", + optional: true, + }, + seed: { + type: "integer", + label: "Seed", + description: "Seed for deterministic outputs.", + optional: true, + }, + topP: { + type: "string", + label: "Top P", + description: "Top-p sampling value. **(range: (0, 1])**.", + optional: true, + }, + topK: { + type: "integer", + label: "Top K", + description: "Top-k sampling value. **(range: [1, Infinity))**.", + min: 1, + optional: true, + }, + frequencyPenalty: { + type: "string", + label: "Frequency Penalty", + description: "Frequency penalty. **(range: [-2, 2])**.", + optional: true, + }, + presencePenalty: { + type: "string", + label: "Presence Penalty", + description: "Presence penalty. **(range: [-2, 2])**.", + optional: true, + }, + repetitionPenalty: { + type: "string", + label: "Repetition Penalty", + description: "Repetition penalty. **(range: (0, 2])**.", + optional: true, + }, + reasoningEffort: { + type: "string", + label: "Reasoning Effort", + description: "OpenAI-style reasoning effort setting.", + options: constants.EFFORT_OPTIONS, + optional: true, + }, + input: { + type: "string", + label: "Input", + description: "The text to generate audio for. The maximum length is 4096 characters.", + }, + voice: { + type: "string", + label: "Voice", + description: "The voice to use when generating the audio.", + async options(opts) { + // Get the selected model from the component props + const model = opts.model || this.model; + if (!model) { + return []; + } + + const { data } = await this.listVoices({ model }); + + return data.map(({ + voice_id: value, name: label, + }) => ({ + label, + value, + })); + }, + }, + audioResponseFormat: { + type: "string", + label: "Response Format", + description: "The format to generate audio in. Supported formats are mp3, opus, aac, flac, wav, and pcm.", + options: constants.AUDIO_RESPONSE_FORMATS, + optional: true, + }, + speed: { + type: "string", + label: "Speed", + description: "The speed of the generated audio. Provide a value from 0.25 to 4.0.", + default: "1.0", + optional: true, + }, + toolOutputs: { + type: "string[]", + label: "Tool Outputs", + description: "The outputs from the tool calls. Each object in the array should contain properties `tool_call_id` and `output`.", + }, + prompt: { + label: "Prompt", + description: "A text description of the desired image(s).", + type: "string", + }, + imageResponseFormat: { + label: "Response Format", + description: "The format in which the generated images are returned.", + type: "string", + optional: true, + options: constants.IMAGE_RESPONSE_FORMATS, + default: "url", + reloadProps: true, + }, + size: { + label: "Size", + description: "The size of the generated images.", + type: "string", + optional: true, + options: constants.IMAGE_SIZES, + default: "1024x1024", + }, + n: { + type: "integer", + label: "N", + description: "The number of images to generate. Must be between 1 and 10. not supported for all models.", + optional: true, + default: 1, + }, + quality: { + type: "string", + label: "Quality", + description: "The quality of the image", + options: constants.IMAGE_QUALITIES, + optional: true, + default: "standard", + }, + style: { + type: "string", + label: "Style", + description: "The style of the image, not supported for all models.", + options: constants.IMAGE_STYLES, + optional: true, + default: "natural", + }, + }, methods: { - // this.$auth contains connected account data - authKeys() { - console.log(Object.keys(this.$auth)); + _apiKey() { + return this.$auth.api_key; + }, + _apiUrl() { + return "https://apipie.ai/v1"; + }, + _getHeaders() { + return { + "Authorization": `Bearer ${this._apiKey()}`, + "Accept": "application/json", + "User-Agent": "@PipedreamHQ/pipedream v1.0", + }; + }, + _makeRequest({ + $ = this, path, ...opts + }) { + return axios($, { + url: `${this._apiUrl()}/${path}`, + headers: this._getHeaders(), + ...opts, + }); + }, + listLlmModels() { + return this._makeRequest({ + path: "models?type=llm", + }); + }, + listImageModels() { + return this._makeRequest({ + path: "models?type=image", + }); + }, + listEmbeddingModels() { + return this._makeRequest({ + path: "models?type=embedding", + }); + }, + listTtsModels() { + return this._makeRequest({ + path: "models?subtype=text-to-speech", + }); + }, + listVoices(opts = {}) { + let queryString = "voices"; + if (opts.model) { + queryString += `&model=${encodeURIComponent(opts.model)}`; + } + return this._makeRequest({ + path: `models?${queryString}`, + }); + }, + sendChatCompetionRequest(opts = {}) { + return this._makeRequest({ + method: "POST", + path: "chat/completions", + ...opts, + }); + }, + createImage(args = {}) { + return this._makeRequest({ + path: "/images/generations", + method: "POST", + ...args, + }); + }, + createSpeech(args = {}) { + return this._makeRequest({ + path: "/audio/speech", + method: "POST", + ...args, + }); + }, + createEmbeddings(args = {}) { + return this._makeRequest({ + path: "/embeddings", + method: "POST", + ...args, + }); }, }, -}; \ No newline at end of file +}; diff --git a/components/apipie_ai/common/constants.mjs b/components/apipie_ai/common/constants.mjs new file mode 100644 index 0000000000000..d2a1b286bd30c --- /dev/null +++ b/components/apipie_ai/common/constants.mjs @@ -0,0 +1,72 @@ +const EFFORT_OPTIONS = [ + "high", + "medium", + "low", +]; + +const AUDIO_RESPONSE_FORMATS = [ + "mp3", + "opus", + "aac", + "flac", + "wav", + "pcm", +]; + +const IMAGE_RESPONSE_FORMATS = [ + { + label: "URL", + value: "url", + }, + { + label: "Base64 JSON", + value: "b64_json", + }, +]; + + +const IMAGE_QUALITIES = [ + { + label: "Standard", + value: "standard", + }, + { + label: "HD", + value: "hd", + }, +]; + +const IMAGE_STYLES = [ + { + label: "Natural", + value: "natural", + }, + { + label: "Vivid", + value: "vivid", + }, +]; + +const IMAGE_SIZES = [ + "256x256", + "512x512", + "1024x1024", + "1792x1024", + "1024x1792", +]; +const TOOL_TYPES = [ + "code_interpreter", + "file_search", + "function", +]; + + +export default { + EFFORT_OPTIONS, + AUDIO_RESPONSE_FORMATS, + IMAGE_RESPONSE_FORMATS, + IMAGE_QUALITIES, + IMAGE_STYLES, + IMAGE_SIZES, + TOOL_TYPES, +}; diff --git a/components/apipie_ai/common/utils.mjs b/components/apipie_ai/common/utils.mjs new file mode 100644 index 0000000000000..dcc9cc61f6f41 --- /dev/null +++ b/components/apipie_ai/common/utils.mjs @@ -0,0 +1,24 @@ +export const parseObject = (obj) => { + if (!obj) return undefined; + + if (Array.isArray(obj)) { + return obj.map((item) => { + if (typeof item === "string") { + try { + return JSON.parse(item); + } catch (e) { + return item; + } + } + return item; + }); + } + if (typeof obj === "string") { + try { + return JSON.parse(obj); + } catch (e) { + return obj; + } + } + return obj; +}; diff --git a/components/apipie_ai/package.json b/components/apipie_ai/package.json index 0fc068179e4ae..a1c88ca2379e3 100644 --- a/components/apipie_ai/package.json +++ b/components/apipie_ai/package.json @@ -5,11 +5,15 @@ "main": "apipie_ai.app.mjs", "keywords": [ "pipedream", - "apipie_ai" + "apipie ai" ], - "homepage": "https://pipedream.com/apps/apipie_ai", + "homepage": "https://pipedream.com/apps/apipie-ai", "author": "Pipedream (https://pipedream.com/)", "publishConfig": { "access": "public" + }, + "dependencies": { + "@pipedream/platform": "^3.0.3", + "axios": "^1.6.2" } } From 889292dd036cbf4306823a622fcc6a866d0acdd7 Mon Sep 17 00:00:00 2001 From: Toocky <115723035+Toocky@users.noreply.github.com> Date: Fri, 23 May 2025 01:55:13 +0100 Subject: [PATCH 02/10] Fixed Typo and URL / issue --- .../send-chat-completion-request.mjs | 2 +- components/apipie_ai/apipie_ai.app.mjs | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/components/apipie_ai/actions/send-chat-completion-request/send-chat-completion-request.mjs b/components/apipie_ai/actions/send-chat-completion-request/send-chat-completion-request.mjs index c65aa938d9947..0bcb0ad8118be 100644 --- a/components/apipie_ai/actions/send-chat-completion-request/send-chat-completion-request.mjs +++ b/components/apipie_ai/actions/send-chat-completion-request/send-chat-completion-request.mjs @@ -172,7 +172,7 @@ export default { const tools = this._buildTools(); if (tools) data.tools = tools; - const response = await this.apipieAi.sendChatCompetionRequest({ + const response = await this.apipieAi.sendChatCompletionRequest({ $, data, timeout: 1000 * 60 * 5, diff --git a/components/apipie_ai/apipie_ai.app.mjs b/components/apipie_ai/apipie_ai.app.mjs index 9b3074912cd7a..1be269ebdfa89 100644 --- a/components/apipie_ai/apipie_ai.app.mjs +++ b/components/apipie_ai/apipie_ai.app.mjs @@ -250,7 +250,7 @@ export default { path: `models?${queryString}`, }); }, - sendChatCompetionRequest(opts = {}) { + sendChatCompletionRequest(opts = {}) { return this._makeRequest({ method: "POST", path: "chat/completions", @@ -259,21 +259,21 @@ export default { }, createImage(args = {}) { return this._makeRequest({ - path: "/images/generations", + path: "images/generations", method: "POST", ...args, }); }, createSpeech(args = {}) { return this._makeRequest({ - path: "/audio/speech", + path: "audio/speech", method: "POST", ...args, }); }, createEmbeddings(args = {}) { return this._makeRequest({ - path: "/embeddings", + path: "embeddings", method: "POST", ...args, }); From 92f4ee3468f1f7f197dd2752b16a95e3e54a66be Mon Sep 17 00:00:00 2001 From: Toocky <115723035+Toocky@users.noreply.github.com> Date: Fri, 23 May 2025 20:25:17 +0100 Subject: [PATCH 03/10] Update list generation to be unique and sorted --- components/apipie_ai/README.md | 29 ++++-- .../convert-text-to-speech.mjs | 31 +++++-- components/apipie_ai/apipie_ai.app.mjs | 90 ++++++++----------- 3 files changed, 81 insertions(+), 69 deletions(-) diff --git a/components/apipie_ai/README.md b/components/apipie_ai/README.md index 582380f8b0412..53410f80ccf60 100644 --- a/components/apipie_ai/README.md +++ b/components/apipie_ai/README.md @@ -1,11 +1,13 @@ # Overview -[APIpie.ai](https://apipie.ai) connects developers with open-source and commercial LLMs via a unified API. With zero infrastructure setup, you can send requests to popular models, switch providers instantly, and explore a growing catalog of AI models—all through one endpoint. Here's an overview of the services offered by [APIpie's API](https://apipie.ai): +[APIpie.ai](https://apipie.ai) connects developers with open-source and commercial AI models via a unified API. With zero infrastructure setup, you can send requests to popular models, switch providers instantly, and explore a growing catalog of AI models—all through one endpoint. Here's an overview of the services offered by [APIpie's API](https://apipie.ai): -- **Model Discovery**: List and explore available LLM models from various providers as seen on the [APIpie Dashboard](https://apipie.ai/dashboard) +- **Model Discovery**: List and explore available LLM models, image models, voice models, and voices from various providers as seen on the [APIpie Dashboard](https://apipie.ai/dashboard) - **Chat Completions**: Send messages to any supported model and receive AI-generated responses +- **Image Generation**: Create images using AI image generation models +- **Text-to-Speech**: Convert text to speech using various voice models and voices -Use Python or Node.js code to make fully authenticated API requests with your APIpie account, enabling you to prototype, test, or integrate LLM responses into apps, emails, alerts, dashboards, and more. +Use Python or Node.js code to make fully authenticated API requests with your APIpie account, enabling you to prototype, test, or integrate AI-generated content including text, images, and speech into apps, emails, alerts, dashboards, and more. # Example Use Cases @@ -13,23 +15,32 @@ The [APIpie API](https://apipie.ai) can be leveraged in a wide range of business ### **Customer Support Automation** -Significantly reduce response times and free up human agents to tackle more complex issues by automating customer support ticket responses. Use the List Models action to dynamically select the most appropriate AI model based on ticket complexity or language requirements, then leverage Chat Completions to generate contextual, helpful responses that can be reviewed before sending to customers. +Significantly reduce response times and free up human agents to tackle more complex issues by automating customer support ticket responses. Use the List Models actions to dynamically select the most appropriate AI model based on ticket complexity or language requirements, then leverage Chat Completions to generate contextual, helpful responses that can be reviewed before sending to customers. ### **Content Creation and Management** -Utilize AI to generate high-quality content for blogs, articles, product descriptions, and marketing material. Create workflows that automatically test different models using the same prompt to compare writing styles, then select the best output for your brand voice. APIpie's unified interface lets you experiment with various open-source and commercial models without managing multiple API integrations. +Utilize AI to generate high-quality content for blogs, articles, product descriptions, and marketing material. Create workflows that automatically test different models using the same prompt to compare writing styles, then select the best output for your brand voice. Generate accompanying images and convert text to speech for multimedia content creation. APIpie's unified interface lets you experiment with various open-source and commercial models without managing multiple API integrations. + +### **Creative Asset Generation** + +Generate visual content and audio assets for marketing campaigns, presentations, and social media. Use image generation models to create custom graphics, illustrations, and visual content that align with your brand. Convert written content to speech using different voice models to create podcasts, audiobooks, or accessibility features for your applications. ### **Multi-Model AI Experimentation Framework** -Build intelligent systems that automatically compare AI model performance across different use cases. Set up workflows that send identical prompts to multiple models simultaneously, collect responses in databases, and analyze quality, cost, and latency differences. This enables data-driven decisions about which AI models work best for specific business scenarios while maintaining the flexibility to switch providers as new models become available. +Build intelligent systems that automatically compare AI model performance across different use cases and modalities. Set up workflows that test text generation, image creation, and voice synthesis across multiple models simultaneously, collect responses in databases, and analyze quality, cost, and latency differences. This enables data-driven decisions about which AI models work best for specific business scenarios while maintaining the flexibility to switch providers as new models become available. # Getting Started First, sign up for an APIpie account, then in a new workflow step open the APIpie app and select one of the available actions: -- **List Models**: Fetch the current catalog of available AI models -- **Chat**: Send messages to any supported model and receive responses +- **List LLM Models**: Fetch the current catalog of available language models +- **List Image Models**: Fetch the current catalog of available image generation models +- **List Voice Models**: Fetch the current catalog of available voice models +- **List Voices**: Fetch the available voices for text-to-speech +- **Chat**: Send messages to any supported language model and receive responses +- **Create Image**: Generate images using AI image generation models +- **Create Text to Speech**: Convert text to speech using various voice models and voices Then connect your APIpie account to Pipedream. Visit [APIpie.ai](https://apipie.ai) and navigate to your profile to generate your [API key.](https://apipie.ai/profile/api-keys) -Copy your API key and paste it into Pipedream when prompted. Now you're all set to use pre-built actions like `Chat` or `List Models`, or use your APIpie API key directly in Node.js or Python code to access the unified AI model interface. +Copy your API key and paste it into Pipedream when prompted. Now you're all set to use pre-built actions like `Chat`, `Create Image`, `Create Text to Speech`, or any of the list actions, or use your APIpie API key directly in Node.js or Python code to access the unified AI model interface. diff --git a/components/apipie_ai/actions/convert-text-to-speech/convert-text-to-speech.mjs b/components/apipie_ai/actions/convert-text-to-speech/convert-text-to-speech.mjs index 2a724b97165b8..4dc9378c088d2 100644 --- a/components/apipie_ai/actions/convert-text-to-speech/convert-text-to-speech.mjs +++ b/components/apipie_ai/actions/convert-text-to-speech/convert-text-to-speech.mjs @@ -14,6 +14,7 @@ export default { apipieAi, "ttsModelId", ], + reloadProps: true, }, input: { propDefinition: [ @@ -21,12 +22,6 @@ export default { "input", ], }, - voice: { - propDefinition: [ - apipieAi, - "voice", - ], - }, responseFormat: { propDefinition: [ apipieAi, @@ -45,6 +40,30 @@ export default { description: "The filename of the output audio file that will be written to the `/tmp` folder, e.g. `/tmp/myFile.mp3`", }, }, + async additionalProps() { + const props = {}; + if (this.model) { + const { data } = await this.apipieAi.listVoices({ model: this.model }); + const uniqueVoices = new Map(); + data.forEach(({ voice_id, name, description }) => { + if (!uniqueVoices.has(voice_id)) { + uniqueVoices.set(voice_id, { name, description }); + } + }); + props.voice = { + type: "string", + label: "Voice", + description: "The voice to use when generating the audio.", + options: Array.from(uniqueVoices.entries()) + .map(([value, { name, description }]) => ({ + label: description ? `${name} - ${description}` : name, + value, + })) + .sort((a, b) => a.label.localeCompare(b.label)), + }; + } + return props; + }, async run({ $ }) { const response = await this.apipieAi.createSpeech({ $, diff --git a/components/apipie_ai/apipie_ai.app.mjs b/components/apipie_ai/apipie_ai.app.mjs index 1be269ebdfa89..d0dcfd663946c 100644 --- a/components/apipie_ai/apipie_ai.app.mjs +++ b/components/apipie_ai/apipie_ai.app.mjs @@ -11,13 +11,18 @@ export default { description: "The ID of the LLM model to use for completions.", async options() { const { data } = await this.listLlmModels(); - - return data.map(({ - id: value, name: label, - }) => ({ - label, - value, - })); + const uniqueModels = new Map(); + data.forEach(({ id, name }) => { + if (!uniqueModels.has(id)) { + uniqueModels.set(id, name); + } + }); + return Array.from(uniqueModels.entries()) + .map(([value, label]) => ({ + label, + value, + })) + .sort((a, b) => a.label.localeCompare(b.label)); }, }, imageModelId: { @@ -26,13 +31,18 @@ export default { description: "The ID of the image model to use for completions.", async options() { const { data } = await this.listImageModels(); - - return data.map(({ - id: value, name: label, - }) => ({ - label, - value, - })); + const uniqueModels = new Map(); + data.forEach(({ id, name }) => { + if (!uniqueModels.has(id)) { + uniqueModels.set(id, name); + } + }); + return Array.from(uniqueModels.entries()) + .map(([value, label]) => ({ + label, + value, + })) + .sort((a, b) => a.label.localeCompare(b.label)); }, }, ttsModelId: { @@ -41,13 +51,18 @@ export default { description: "The ID of the tts model to use for completions.", async options() { const { data } = await this.listTtsModels(); - - return data.map(({ - id: value, name: label, - }) => ({ - label, - value, - })); + const uniqueModels = new Map(); + data.forEach(({ id, name }) => { + if (!uniqueModels.has(id)) { + uniqueModels.set(id, name); + } + }); + return Array.from(uniqueModels.entries()) + .map(([value, label]) => ({ + label, + value, + })) + .sort((a, b) => a.label.localeCompare(b.label)); }, }, maxTokens: { @@ -112,27 +127,6 @@ export default { label: "Input", description: "The text to generate audio for. The maximum length is 4096 characters.", }, - voice: { - type: "string", - label: "Voice", - description: "The voice to use when generating the audio.", - async options(opts) { - // Get the selected model from the component props - const model = opts.model || this.model; - if (!model) { - return []; - } - - const { data } = await this.listVoices({ model }); - - return data.map(({ - voice_id: value, name: label, - }) => ({ - label, - value, - })); - }, - }, audioResponseFormat: { type: "string", label: "Response Format", @@ -231,11 +225,6 @@ export default { path: "models?type=image", }); }, - listEmbeddingModels() { - return this._makeRequest({ - path: "models?type=embedding", - }); - }, listTtsModels() { return this._makeRequest({ path: "models?subtype=text-to-speech", @@ -271,12 +260,5 @@ export default { ...args, }); }, - createEmbeddings(args = {}) { - return this._makeRequest({ - path: "embeddings", - method: "POST", - ...args, - }); - }, }, }; From 07b716005f2c31fb393f298c34b174c90e53b68e Mon Sep 17 00:00:00 2001 From: Toocky <115723035+Toocky@users.noreply.github.com> Date: Sat, 24 May 2025 06:38:29 +0100 Subject: [PATCH 04/10] made code rabbit suggested changes --- components/apipie_ai/README.md | 22 +-- .../convert-text-to-speech.mjs | 31 +++- .../actions/create-image/create-image.mjs | 9 +- .../retrieve-available-tts-voices.mjs} | 2 +- .../send-chat-completion-request.mjs | 19 +-- components/apipie_ai/apipie_ai.app.mjs | 143 +++++++++--------- components/apipie_ai/common/constants.mjs | 8 - components/apipie_ai/common/utils.mjs | 5 + 8 files changed, 126 insertions(+), 113 deletions(-) rename components/apipie_ai/actions/{retrieve-available-tts-voices-models/retrieve-available-tts-voices-models.mjs => retrieve-available-tts-voices/retrieve-available-tts-voices.mjs} (90%) diff --git a/components/apipie_ai/README.md b/components/apipie_ai/README.md index 53410f80ccf60..cd96921619971 100644 --- a/components/apipie_ai/README.md +++ b/components/apipie_ai/README.md @@ -13,33 +13,33 @@ Use Python or Node.js code to make fully authenticated API requests with your AP The [APIpie API](https://apipie.ai) can be leveraged in a wide range of business contexts to drive efficiency, enhance customer experiences, and innovate product offerings through unified access to multiple AI models. Here are some specific business use cases for utilizing the APIpie API: -### **Customer Support Automation** +## **Customer Support Automation** Significantly reduce response times and free up human agents to tackle more complex issues by automating customer support ticket responses. Use the List Models actions to dynamically select the most appropriate AI model based on ticket complexity or language requirements, then leverage Chat Completions to generate contextual, helpful responses that can be reviewed before sending to customers. -### **Content Creation and Management** +## **Content Creation and Management** Utilize AI to generate high-quality content for blogs, articles, product descriptions, and marketing material. Create workflows that automatically test different models using the same prompt to compare writing styles, then select the best output for your brand voice. Generate accompanying images and convert text to speech for multimedia content creation. APIpie's unified interface lets you experiment with various open-source and commercial models without managing multiple API integrations. -### **Creative Asset Generation** +## **Creative Asset Generation** Generate visual content and audio assets for marketing campaigns, presentations, and social media. Use image generation models to create custom graphics, illustrations, and visual content that align with your brand. Convert written content to speech using different voice models to create podcasts, audiobooks, or accessibility features for your applications. -### **Multi-Model AI Experimentation Framework** +## **Multi-Model AI Experimentation Framework** -Build intelligent systems that automatically compare AI model performance across different use cases and modalities. Set up workflows that test text generation, image creation, and voice synthesis across multiple models simultaneously, collect responses in databases, and analyze quality, cost, and latency differences. This enables data-driven decisions about which AI models work best for specific business scenarios while maintaining the flexibility to switch providers as new models become available. +Build intelligent systems that automatically compare AI model performance across different use cases and modalities. Set up workflows that test text generation, image creation, and voice synthesis across multiple models simultaneously, collect responses in databases, and analyze quality, cost, and latency differences. This enables data-driven decisions about which AI models work best for specific business scenarios, while maintaining the flexibility to switch providers as new models become available. # Getting Started First, sign up for an APIpie account, then in a new workflow step open the APIpie app and select one of the available actions: -- **List LLM Models**: Fetch the current catalog of available language models -- **List Image Models**: Fetch the current catalog of available image generation models -- **List Voice Models**: Fetch the current catalog of available voice models -- **List Voices**: Fetch the available voices for text-to-speech -- **Chat**: Send messages to any supported language model and receive responses +- **Retrieve Available Image Models**: Fetch the current catalog of available language models +- **Retrieve Available LLM Models**: Fetch the current catalog of available image generation models +- **Retrieve Available TTS Models**: Fetch the current catalog of available voice models +- **Retrieve Available TTS Voices**: Fetch the available voices for text-to-speech +- **Send Chat Completion Request**: Send messages to any supported language model and receive responses - **Create Image**: Generate images using AI image generation models -- **Create Text to Speech**: Convert text to speech using various voice models and voices +- **Convert Text to Speech (TTS)**: Convert text to speech using various voice models and voices Then connect your APIpie account to Pipedream. Visit [APIpie.ai](https://apipie.ai) and navigate to your profile to generate your [API key.](https://apipie.ai/profile/api-keys) diff --git a/components/apipie_ai/actions/convert-text-to-speech/convert-text-to-speech.mjs b/components/apipie_ai/actions/convert-text-to-speech/convert-text-to-speech.mjs index 4dc9378c088d2..0693c9853e47e 100644 --- a/components/apipie_ai/actions/convert-text-to-speech/convert-text-to-speech.mjs +++ b/components/apipie_ai/actions/convert-text-to-speech/convert-text-to-speech.mjs @@ -12,8 +12,11 @@ export default { model: { propDefinition: [ apipieAi, - "ttsModelId", + "modelId", + { modelType: "tts" }, ], + label: "TTS Model", + description: "The text-to-speech model to use.", reloadProps: true, }, input: { @@ -43,20 +46,28 @@ export default { async additionalProps() { const props = {}; if (this.model) { - const { data } = await this.apipieAi.listVoices({ model: this.model }); + // Parse the model JSON to get id and route + const modelData = JSON.parse(this.model); + const { route } = modelData; + + // Get all voices and filter by the model route + const { data } = await this.apipieAi.listVoices(); + const filteredVoices = data.filter(voice => voice.model === route); + const uniqueVoices = new Map(); - data.forEach(({ voice_id, name, description }) => { + filteredVoices.forEach(({ voice_id, name }) => { if (!uniqueVoices.has(voice_id)) { - uniqueVoices.set(voice_id, { name, description }); + uniqueVoices.set(voice_id, name); } }); + props.voice = { type: "string", label: "Voice", description: "The voice to use when generating the audio.", options: Array.from(uniqueVoices.entries()) - .map(([value, { name, description }]) => ({ - label: description ? `${name} - ${description}` : name, + .map(([value, name]) => ({ + label: name, value, })) .sort((a, b) => a.label.localeCompare(b.label)), @@ -65,14 +76,18 @@ export default { return props; }, async run({ $ }) { + // Parse the model JSON to get the actual model id for the API call + const modelData = JSON.parse(this.model); + const { id: modelId } = modelData; + const response = await this.apipieAi.createSpeech({ $, data: { - model: this.model, + model: modelId, input: this.input, voice: this.voice, response_format: this.responseFormat, - speed: Number(this.speed), + speed: this.speed, }, responseType: "arraybuffer", }); diff --git a/components/apipie_ai/actions/create-image/create-image.mjs b/components/apipie_ai/actions/create-image/create-image.mjs index 23282efa6b2e6..5a13761d17a3b 100644 --- a/components/apipie_ai/actions/create-image/create-image.mjs +++ b/components/apipie_ai/actions/create-image/create-image.mjs @@ -11,8 +11,11 @@ export default { model: { propDefinition: [ apipieAi, - "imageModelId", + "modelId", + { modelType: "image" }, ], + label: "Image Model", + description: "The image generation model to use.", }, prompt: { propDefinition: [ @@ -58,9 +61,7 @@ export default { prompt: this.prompt, n: this.n, size: this.size, - response_format: this.responseFormat === "url" - ? this.responseFormat - : "b64_json", + ...(this.responseFormat && { response_format: this.responseFormat }), model: this.model, quality: this.quality, style: this.style, diff --git a/components/apipie_ai/actions/retrieve-available-tts-voices-models/retrieve-available-tts-voices-models.mjs b/components/apipie_ai/actions/retrieve-available-tts-voices/retrieve-available-tts-voices.mjs similarity index 90% rename from components/apipie_ai/actions/retrieve-available-tts-voices-models/retrieve-available-tts-voices-models.mjs rename to components/apipie_ai/actions/retrieve-available-tts-voices/retrieve-available-tts-voices.mjs index 8dd32d18a2e65..44047a6138f01 100644 --- a/components/apipie_ai/actions/retrieve-available-tts-voices-models/retrieve-available-tts-voices-models.mjs +++ b/components/apipie_ai/actions/retrieve-available-tts-voices/retrieve-available-tts-voices.mjs @@ -1,7 +1,7 @@ import apipieAi from "../../apipie_ai.app.mjs"; export default { - key: "apipie_ai-retrieve-available-tts-voices-models", + key: "apipie_ai-retrieve-available-tts-voices", name: "Retrieve Available TTS Voices", version: "0.0.1", description: "Returns a list of TTS Voices available through the API. [See the dashboard](https://apipie.ai/dashboard)", diff --git a/components/apipie_ai/actions/send-chat-completion-request/send-chat-completion-request.mjs b/components/apipie_ai/actions/send-chat-completion-request/send-chat-completion-request.mjs index 0bcb0ad8118be..4be9aee2d916d 100644 --- a/components/apipie_ai/actions/send-chat-completion-request/send-chat-completion-request.mjs +++ b/components/apipie_ai/actions/send-chat-completion-request/send-chat-completion-request.mjs @@ -14,8 +14,11 @@ export default { model: { propDefinition: [ apipieAi, - "chatCompletionModelId", + "modelId", + { modelType: "llm" }, ], + label: "LLM Model", + description: "The LLM model to use for completions.", }, messages: { type: "string[]", @@ -127,9 +130,7 @@ export default { }, methods: { _buildTools() { - const tools = this.toolTypes?.filter((toolType) => toolType !== "function")?.map((toolType) => ({ - type: toolType, - })) || []; + const tools = []; if (this.toolTypes?.includes("function")) { const numberOfFunctions = this.numberOfFunctions || 1; @@ -159,13 +160,13 @@ export default { // Add optional parameters only if they exist if (this.maxTokens) data.max_tokens = this.maxTokens; - if (this.temperature) data.temperature = parseFloat(this.temperature); + if (this.temperature) data.temperature = this.temperature; if (this.seed) data.seed = this.seed; - if (this.topP) data.top_p = parseFloat(this.topP); + if (this.topP) data.top_p = this.topP; if (this.topK) data.top_k = this.topK; - if (this.frequencyPenalty) data.frequency_penalty = parseFloat(this.frequencyPenalty); - if (this.presencePenalty) data.presence_penalty = parseFloat(this.presencePenalty); - if (this.repetitionPenalty) data.repetition_penalty = parseFloat(this.repetitionPenalty); + if (this.frequencyPenalty) data.frequency_penalty = this.frequencyPenalty; + if (this.presencePenalty) data.presence_penalty = this.presencePenalty; + if (this.repetitionPenalty) data.repetition_penalty = this.repetitionPenalty; if (this.reasoningEffort) data.reasoning_effort = this.reasoningEffort; // Add tools if they exist diff --git a/components/apipie_ai/apipie_ai.app.mjs b/components/apipie_ai/apipie_ai.app.mjs index d0dcfd663946c..12d1776eeb8f1 100644 --- a/components/apipie_ai/apipie_ai.app.mjs +++ b/components/apipie_ai/apipie_ai.app.mjs @@ -5,64 +5,54 @@ export default { type: "app", app: "apipie_ai", propDefinitions: { - chatCompletionModelId: { - type: "string", - label: "Completions Model", - description: "The ID of the LLM model to use for completions.", - async options() { - const { data } = await this.listLlmModels(); - const uniqueModels = new Map(); - data.forEach(({ id, name }) => { - if (!uniqueModels.has(id)) { - uniqueModels.set(id, name); - } - }); - return Array.from(uniqueModels.entries()) - .map(([value, label]) => ({ - label, - value, - })) - .sort((a, b) => a.label.localeCompare(b.label)); - }, - }, - imageModelId: { - type: "string", - label: "Model", - description: "The ID of the image model to use for completions.", - async options() { - const { data } = await this.listImageModels(); - const uniqueModels = new Map(); - data.forEach(({ id, name }) => { - if (!uniqueModels.has(id)) { - uniqueModels.set(id, name); - } - }); - return Array.from(uniqueModels.entries()) - .map(([value, label]) => ({ - label, - value, - })) - .sort((a, b) => a.label.localeCompare(b.label)); - }, - }, - ttsModelId: { + modelId: { type: "string", label: "Model", - description: "The ID of the tts model to use for completions.", - async options() { - const { data } = await this.listTtsModels(); - const uniqueModels = new Map(); - data.forEach(({ id, name }) => { - if (!uniqueModels.has(id)) { - uniqueModels.set(id, name); - } - }); - return Array.from(uniqueModels.entries()) - .map(([value, label]) => ({ - label, - value, - })) - .sort((a, b) => a.label.localeCompare(b.label)); + description: "The ID of the model to use.", + async options(opts) { + const { modelType } = opts; + + // Determine which API call to make based on modelType + let data; + if (modelType === "llm") { + ({ data } = await this.listLlmModels()); + } else if (modelType === "image") { + ({ data } = await this.listImageModels()); + } else if (modelType === "tts") { + ({ data } = await this.listTtsModels()); + } else { + return []; + } + + // Handle TTS models differently (they need route information) + if (modelType === "tts") { + const uniqueModels = new Map(); + data.forEach(({ id, name, route }) => { + if (!uniqueModels.has(id)) { + uniqueModels.set(id, { name, route }); + } + }); + return Array.from(uniqueModels.entries()) + .map(([id, { name, route }]) => ({ + label: name, + value: JSON.stringify({ id, route }), + })) + .sort((a, b) => a.label.localeCompare(b.label)); + } else { + // Handle LLM and image models (simple id/name mapping) + const uniqueModels = new Map(); + data.forEach(({ id, name }) => { + if (!uniqueModels.has(id)) { + uniqueModels.set(id, name); + } + }); + return Array.from(uniqueModels.entries()) + .map(([value, label]) => ({ + label, + value, + })) + .sort((a, b) => a.label.localeCompare(b.label)); + } }, }, maxTokens: { @@ -73,46 +63,56 @@ export default { optional: true, }, temperature: { - type: "string", + type: "number", label: "Temperature", description: "Sampling temperature. **(range: [0, 2])**.", + min: 0, + max: 2, optional: true, }, seed: { - type: "integer", + type: "number", label: "Seed", description: "Seed for deterministic outputs.", optional: true, }, topP: { - type: "string", + type: "number", label: "Top P", description: "Top-p sampling value. **(range: (0, 1])**.", + min: 0, + max: 1.0, optional: true, }, topK: { - type: "integer", + type: "number", label: "Top K", description: "Top-k sampling value. **(range: [1, Infinity))**.", min: 1, optional: true, }, frequencyPenalty: { - type: "string", + type: "number", label: "Frequency Penalty", description: "Frequency penalty. **(range: [-2, 2])**.", + min: -2.0, + max: 2.0, optional: true, }, presencePenalty: { - type: "string", + type: "number", label: "Presence Penalty", description: "Presence penalty. **(range: [-2, 2])**.", + min: -2.0, + max: 2.0, optional: true, }, repetitionPenalty: { - type: "string", + type: "number", label: "Repetition Penalty", description: "Repetition penalty. **(range: (0, 2])**.", + min: -2.0, + max: 2.0, optional: true, }, reasoningEffort: { @@ -135,10 +135,12 @@ export default { optional: true, }, speed: { - type: "string", + type: "number", label: "Speed", description: "The speed of the generated audio. Provide a value from 0.25 to 4.0.", - default: "1.0", + default: 1, + min: 0.25, + max: 4.0, optional: true, }, toolOutputs: { @@ -207,8 +209,9 @@ export default { }; }, _makeRequest({ - $ = this, path, ...opts + $, path, ...opts }) { + $ = $ || this.$; return axios($, { url: `${this._apiUrl()}/${path}`, headers: this._getHeaders(), @@ -230,13 +233,9 @@ export default { path: "models?subtype=text-to-speech", }); }, - listVoices(opts = {}) { - let queryString = "voices"; - if (opts.model) { - queryString += `&model=${encodeURIComponent(opts.model)}`; - } + listVoices() { return this._makeRequest({ - path: `models?${queryString}`, + path: "models?voices", }); }, sendChatCompletionRequest(opts = {}) { diff --git a/components/apipie_ai/common/constants.mjs b/components/apipie_ai/common/constants.mjs index d2a1b286bd30c..85a04092b0d0b 100644 --- a/components/apipie_ai/common/constants.mjs +++ b/components/apipie_ai/common/constants.mjs @@ -3,7 +3,6 @@ const EFFORT_OPTIONS = [ "medium", "low", ]; - const AUDIO_RESPONSE_FORMATS = [ "mp3", "opus", @@ -12,7 +11,6 @@ const AUDIO_RESPONSE_FORMATS = [ "wav", "pcm", ]; - const IMAGE_RESPONSE_FORMATS = [ { label: "URL", @@ -23,8 +21,6 @@ const IMAGE_RESPONSE_FORMATS = [ value: "b64_json", }, ]; - - const IMAGE_QUALITIES = [ { label: "Standard", @@ -35,7 +31,6 @@ const IMAGE_QUALITIES = [ value: "hd", }, ]; - const IMAGE_STYLES = [ { label: "Natural", @@ -46,7 +41,6 @@ const IMAGE_STYLES = [ value: "vivid", }, ]; - const IMAGE_SIZES = [ "256x256", "512x512", @@ -59,8 +53,6 @@ const TOOL_TYPES = [ "file_search", "function", ]; - - export default { EFFORT_OPTIONS, AUDIO_RESPONSE_FORMATS, diff --git a/components/apipie_ai/common/utils.mjs b/components/apipie_ai/common/utils.mjs index dcc9cc61f6f41..6631ff58c328e 100644 --- a/components/apipie_ai/common/utils.mjs +++ b/components/apipie_ai/common/utils.mjs @@ -1,3 +1,8 @@ +/** +* Safely parses JSON strings or arrays of JSON strings into JavaScript objects +* @param {any} obj - Input that may be a JSON string, array of JSON strings, or any other value +* @returns {any} - Parsed object(s) or the original input if parsing fails +*/ export const parseObject = (obj) => { if (!obj) return undefined; From 83f4701904c3fa4cf77ab79ae9ed493b4ecb1831 Mon Sep 17 00:00:00 2001 From: Toocky <115723035+Toocky@users.noreply.github.com> Date: Sat, 24 May 2025 06:50:33 +0100 Subject: [PATCH 05/10] Reverted model look up to simplifed direct calls --- .../convert-text-to-speech.mjs | 5 +- .../actions/create-image/create-image.mjs | 5 +- .../send-chat-completion-request.mjs | 5 +- components/apipie_ai/apipie_ai.app.mjs | 102 ++++++++++-------- 4 files changed, 59 insertions(+), 58 deletions(-) diff --git a/components/apipie_ai/actions/convert-text-to-speech/convert-text-to-speech.mjs b/components/apipie_ai/actions/convert-text-to-speech/convert-text-to-speech.mjs index 0693c9853e47e..67b92ec784119 100644 --- a/components/apipie_ai/actions/convert-text-to-speech/convert-text-to-speech.mjs +++ b/components/apipie_ai/actions/convert-text-to-speech/convert-text-to-speech.mjs @@ -12,11 +12,8 @@ export default { model: { propDefinition: [ apipieAi, - "modelId", - { modelType: "tts" }, + "ttsModelId", ], - label: "TTS Model", - description: "The text-to-speech model to use.", reloadProps: true, }, input: { diff --git a/components/apipie_ai/actions/create-image/create-image.mjs b/components/apipie_ai/actions/create-image/create-image.mjs index 5a13761d17a3b..c72fde39b6986 100644 --- a/components/apipie_ai/actions/create-image/create-image.mjs +++ b/components/apipie_ai/actions/create-image/create-image.mjs @@ -11,11 +11,8 @@ export default { model: { propDefinition: [ apipieAi, - "modelId", - { modelType: "image" }, + "imageModelId", ], - label: "Image Model", - description: "The image generation model to use.", }, prompt: { propDefinition: [ diff --git a/components/apipie_ai/actions/send-chat-completion-request/send-chat-completion-request.mjs b/components/apipie_ai/actions/send-chat-completion-request/send-chat-completion-request.mjs index 4be9aee2d916d..e24f290131a69 100644 --- a/components/apipie_ai/actions/send-chat-completion-request/send-chat-completion-request.mjs +++ b/components/apipie_ai/actions/send-chat-completion-request/send-chat-completion-request.mjs @@ -14,11 +14,8 @@ export default { model: { propDefinition: [ apipieAi, - "modelId", - { modelType: "llm" }, + "chatCompletionModelId", ], - label: "LLM Model", - description: "The LLM model to use for completions.", }, messages: { type: "string[]", diff --git a/components/apipie_ai/apipie_ai.app.mjs b/components/apipie_ai/apipie_ai.app.mjs index 12d1776eeb8f1..64cd0c91d29fb 100644 --- a/components/apipie_ai/apipie_ai.app.mjs +++ b/components/apipie_ai/apipie_ai.app.mjs @@ -5,54 +5,64 @@ export default { type: "app", app: "apipie_ai", propDefinitions: { - modelId: { + chatCompletionModelId: { + type: "string", + label: "Completions Model", + description: "The ID of the LLM model to use for completions.", + async options() { + const { data } = await this.listLlmModels(); + const uniqueModels = new Map(); + data.forEach(({ id, name }) => { + if (!uniqueModels.has(id)) { + uniqueModels.set(id, name); + } + }); + return Array.from(uniqueModels.entries()) + .map(([value, label]) => ({ + label, + value, + })) + .sort((a, b) => a.label.localeCompare(b.label)); + }, + }, + imageModelId: { + type: "string", + label: "Model", + description: "The ID of the image model to use for completions.", + async options() { + const { data } = await this.listImageModels(); + const uniqueModels = new Map(); + data.forEach(({ id, name }) => { + if (!uniqueModels.has(id)) { + uniqueModels.set(id, name); + } + }); + return Array.from(uniqueModels.entries()) + .map(([value, label]) => ({ + label, + value, + })) + .sort((a, b) => a.label.localeCompare(b.label)); + }, + }, + ttsModelId: { type: "string", label: "Model", - description: "The ID of the model to use.", - async options(opts) { - const { modelType } = opts; - - // Determine which API call to make based on modelType - let data; - if (modelType === "llm") { - ({ data } = await this.listLlmModels()); - } else if (modelType === "image") { - ({ data } = await this.listImageModels()); - } else if (modelType === "tts") { - ({ data } = await this.listTtsModels()); - } else { - return []; - } - - // Handle TTS models differently (they need route information) - if (modelType === "tts") { - const uniqueModels = new Map(); - data.forEach(({ id, name, route }) => { - if (!uniqueModels.has(id)) { - uniqueModels.set(id, { name, route }); - } - }); - return Array.from(uniqueModels.entries()) - .map(([id, { name, route }]) => ({ - label: name, - value: JSON.stringify({ id, route }), - })) - .sort((a, b) => a.label.localeCompare(b.label)); - } else { - // Handle LLM and image models (simple id/name mapping) - const uniqueModels = new Map(); - data.forEach(({ id, name }) => { - if (!uniqueModels.has(id)) { - uniqueModels.set(id, name); - } - }); - return Array.from(uniqueModels.entries()) - .map(([value, label]) => ({ - label, - value, - })) - .sort((a, b) => a.label.localeCompare(b.label)); - } + description: "The ID of the tts model to use for completions.", + async options() { + const { data } = await this.listTtsModels(); + const uniqueModels = new Map(); + data.forEach(({ id, name }) => { + if (!uniqueModels.has(id)) { + uniqueModels.set(id, name); + } + }); + return Array.from(uniqueModels.entries()) + .map(([value, label]) => ({ + label, + value, + })) + .sort((a, b) => a.label.localeCompare(b.label)); }, }, maxTokens: { From 8ad84018583e84129d1534194c9d68d621c25f95 Mon Sep 17 00:00:00 2001 From: Toocky <115723035+Toocky@users.noreply.github.com> Date: Sun, 25 May 2025 18:43:09 +0100 Subject: [PATCH 06/10] correct range --- components/apipie_ai/apipie_ai.app.mjs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/components/apipie_ai/apipie_ai.app.mjs b/components/apipie_ai/apipie_ai.app.mjs index 64cd0c91d29fb..e38dad7c794d9 100644 --- a/components/apipie_ai/apipie_ai.app.mjs +++ b/components/apipie_ai/apipie_ai.app.mjs @@ -121,7 +121,7 @@ export default { type: "number", label: "Repetition Penalty", description: "Repetition penalty. **(range: (0, 2])**.", - min: -2.0, + min: 0, max: 2.0, optional: true, }, From b38a10c4c0dcad03f3586ca628417c2cb6fc0b08 Mon Sep 17 00:00:00 2001 From: Toocky <115723035+Toocky@users.noreply.github.com> Date: Sun, 25 May 2025 19:25:42 +0100 Subject: [PATCH 07/10] Added error handling --- .../convert-text-to-speech.mjs | 71 ++++++++------ .../actions/create-image/create-image.mjs | 45 +++++---- .../retrieve-available-image-models.mjs | 16 ++-- .../retrieve-available-llm-models.mjs | 16 ++-- .../retrieve-available-tts-models.mjs | 16 ++-- .../retrieve-available-tts-voices.mjs | 16 ++-- .../send-chat-completion-request.mjs | 61 ++++++------ components/apipie_ai/apipie_ai.app.mjs | 94 +++++++++++-------- 8 files changed, 198 insertions(+), 137 deletions(-) diff --git a/components/apipie_ai/actions/convert-text-to-speech/convert-text-to-speech.mjs b/components/apipie_ai/actions/convert-text-to-speech/convert-text-to-speech.mjs index 67b92ec784119..144fd8422e0e6 100644 --- a/components/apipie_ai/actions/convert-text-to-speech/convert-text-to-speech.mjs +++ b/components/apipie_ai/actions/convert-text-to-speech/convert-text-to-speech.mjs @@ -41,8 +41,9 @@ export default { }, }, async additionalProps() { - const props = {}; - if (this.model) { + try { + const props = {}; + if (this.model) { // Parse the model JSON to get id and route const modelData = JSON.parse(this.model); const { route } = modelData; @@ -69,36 +70,52 @@ export default { })) .sort((a, b) => a.label.localeCompare(b.label)), }; + } + return props; + } catch (e) { + $.export("Error fetching voices", e); + throw new ConfigurationError(e.message || "Failed to fetch voices"); } - return props; }, async run({ $ }) { // Parse the model JSON to get the actual model id for the API call - const modelData = JSON.parse(this.model); - const { id: modelId } = modelData; - - const response = await this.apipieAi.createSpeech({ - $, - data: { - model: modelId, - input: this.input, - voice: this.voice, - response_format: this.responseFormat, - speed: this.speed, - }, - responseType: "arraybuffer", - }); - - const outputFilePath = this.outputFile.includes("tmp/") - ? this.outputFile - : `/tmp/${this.outputFile}`; + try { + const modelData = JSON.parse(this.model); + const { id: modelId } = modelData; + const response = await this.apipieAi.createSpeech({ + $, + data: { + model: modelId, + input: this.input, + voice: this.voice, + response_format: this.responseFormat, + speed: this.speed, + }, + responseType: "arraybuffer", + }); - await fs.promises.writeFile(outputFilePath, Buffer.from(response)); + if (response.error) { + $.export("Error creating audio", response.error); + throw new ConfigurationError(e.message || "Failed to create audio"); + } + const outputFilePath = this.outputFile.includes("tmp/") + ? this.outputFile + : `/tmp/${this.outputFile}`; - $.export("$summary", "Generated audio successfully"); - return { - outputFilePath, - response, - }; + try { + await fs.promises.writeFile(outputFilePath, Buffer.from(response)); + } catch (e) { + $.export("Error saving audio file", e); + throw new ConfigurationError(e.message || "Failed to save audio file"); + } + $.export("$summary", "Generated audio successfully"); + return { + outputFilePath, + response, + }; + } catch (e) { + $.export("Error creating audio", e); + throw new ConfigurationError(e.message || "Failed to create audio"); + } }, }; diff --git a/components/apipie_ai/actions/create-image/create-image.mjs b/components/apipie_ai/actions/create-image/create-image.mjs index c72fde39b6986..31f2cbcb5c664 100644 --- a/components/apipie_ai/actions/create-image/create-image.mjs +++ b/components/apipie_ai/actions/create-image/create-image.mjs @@ -52,25 +52,32 @@ export default { }, }, async run({ $ }) { - const response = await this.apipieAi.createImage({ - $, - data: { - prompt: this.prompt, - n: this.n, - size: this.size, - ...(this.responseFormat && { response_format: this.responseFormat }), - model: this.model, - quality: this.quality, - style: this.style, - }, - }); - - if (response.data.length) { - $.export("$summary", `Successfully created ${response.data.length} image${response.data.length === 1 - ? "" - : "s"}`); + try { + const response = await this.apipieAi.createImage({ + $, + data: { + prompt: this.prompt, + n: this.n, + size: this.size, + ...(this.responseFormat && { response_format: this.responseFormat }), + model: this.model, + quality: this.quality, + style: this.style, + }, + }); + if (response.error) { + $.export("Error creating Image", response.error); + throw new ConfigurationError(e.message || "Failed to create Image"); + } + if (response.data.length) { + $.export("$summary", `Successfully created ${response.data.length} image${response.data.length === 1 + ? "" + : "s"}`); + } + return response; + } catch (e) { + $.export("Error creating Image", e); + throw new ConfigurationError(e.message || "Failed to create Image"); } - - return response; }, }; diff --git a/components/apipie_ai/actions/retrieve-available-image-models/retrieve-available-image-models.mjs b/components/apipie_ai/actions/retrieve-available-image-models/retrieve-available-image-models.mjs index 1c697213b4bbc..714a9e1c2c228 100644 --- a/components/apipie_ai/actions/retrieve-available-image-models/retrieve-available-image-models.mjs +++ b/components/apipie_ai/actions/retrieve-available-image-models/retrieve-available-image-models.mjs @@ -10,11 +10,15 @@ export default { apipieAi, }, async run({ $ }) { - const response = await this.apipieAi.listImageModels({ - $, - }); - - $.export("$summary", `Successfully retrieved ${response.data.length} available Image model(s)!`); - return response; + try { + const response = await this.apipieAi.listImageModels({ + $, + }); + $.export("$summary", `Successfully retrieved ${response.data.length} available Image model(s)!`); + return response; + } catch (e) { + $.export("Error fetching Image Models", e); + throw new ConfigurationError(e.message || "Failed to fetch Image Models"); + } }, }; diff --git a/components/apipie_ai/actions/retrieve-available-llm-models/retrieve-available-llm-models.mjs b/components/apipie_ai/actions/retrieve-available-llm-models/retrieve-available-llm-models.mjs index 52f6a572330c0..67246d60fbb68 100644 --- a/components/apipie_ai/actions/retrieve-available-llm-models/retrieve-available-llm-models.mjs +++ b/components/apipie_ai/actions/retrieve-available-llm-models/retrieve-available-llm-models.mjs @@ -10,11 +10,15 @@ export default { apipieAi, }, async run({ $ }) { - const response = await this.apipieAi.listLlmModels({ - $, - }); - - $.export("$summary", `Successfully retrieved ${response.data.length} available LLM model(s)!`); - return response; + try { + const response = await this.apipieAi.listLlmModels({ + $, + }); + $.export("$summary", `Successfully retrieved ${response.data.length} available LLM model(s)!`); + return response; + } catch (e) { + $.export("Error fetching LLM Models", e); + throw new ConfigurationError(e.message || "Failed to fetch LLM Models"); + } }, }; diff --git a/components/apipie_ai/actions/retrieve-available-tts-models/retrieve-available-tts-models.mjs b/components/apipie_ai/actions/retrieve-available-tts-models/retrieve-available-tts-models.mjs index 721f1e46e5685..64915217e791b 100644 --- a/components/apipie_ai/actions/retrieve-available-tts-models/retrieve-available-tts-models.mjs +++ b/components/apipie_ai/actions/retrieve-available-tts-models/retrieve-available-tts-models.mjs @@ -10,11 +10,15 @@ export default { apipieAi, }, async run({ $ }) { - const response = await this.apipieAi.listTtsModels({ - $, - }); - - $.export("$summary", `Successfully retrieved ${response.data.length} available TTS model(s)!`); - return response; + try { + const response = await this.apipieAi.listTtsModels({ + $, + }); + $.export("$summary", `Successfully retrieved ${response.data.length} available TTS model(s)!`); + return response; + } catch (e) { + $.export("Error fetching TTS Models", e); + throw new ConfigurationError(e.message || "Failed to fetch TTS models"); + } }, }; diff --git a/components/apipie_ai/actions/retrieve-available-tts-voices/retrieve-available-tts-voices.mjs b/components/apipie_ai/actions/retrieve-available-tts-voices/retrieve-available-tts-voices.mjs index 44047a6138f01..9c87590313d4a 100644 --- a/components/apipie_ai/actions/retrieve-available-tts-voices/retrieve-available-tts-voices.mjs +++ b/components/apipie_ai/actions/retrieve-available-tts-voices/retrieve-available-tts-voices.mjs @@ -10,11 +10,15 @@ export default { apipieAi, }, async run({ $ }) { - const response = await this.apipieAi.listVoices({ - $, - }); - - $.export("$summary", `Successfully retrieved ${response.data.length} available TTS Voices!`); - return response; + try { + const response = await this.apipieAi.listVoices({ + $, + }); + $.export("$summary", `Successfully retrieved ${response.data.length} available TTS Voices!`); + return response; + } catch (e) { + $.export("Error fetching Voices", e); + throw new ConfigurationError(e.message || "Failed to fetch Voices"); + } }, }; diff --git a/components/apipie_ai/actions/send-chat-completion-request/send-chat-completion-request.mjs b/components/apipie_ai/actions/send-chat-completion-request/send-chat-completion-request.mjs index e24f290131a69..5cad31c75f56a 100644 --- a/components/apipie_ai/actions/send-chat-completion-request/send-chat-completion-request.mjs +++ b/components/apipie_ai/actions/send-chat-completion-request/send-chat-completion-request.mjs @@ -149,38 +149,43 @@ export default { }, }, async run({ $ }) { - const data = { - model: this.model, - messages: parseObject(this.messages), - stream: false, - }; + try { + const data = { + model: this.model, + messages: parseObject(this.messages), + stream: false, + }; - // Add optional parameters only if they exist - if (this.maxTokens) data.max_tokens = this.maxTokens; - if (this.temperature) data.temperature = this.temperature; - if (this.seed) data.seed = this.seed; - if (this.topP) data.top_p = this.topP; - if (this.topK) data.top_k = this.topK; - if (this.frequencyPenalty) data.frequency_penalty = this.frequencyPenalty; - if (this.presencePenalty) data.presence_penalty = this.presencePenalty; - if (this.repetitionPenalty) data.repetition_penalty = this.repetitionPenalty; - if (this.reasoningEffort) data.reasoning_effort = this.reasoningEffort; + // Add optional parameters only if they exist + if (this.maxTokens) data.max_tokens = this.maxTokens; + if (this.temperature) data.temperature = this.temperature; + if (this.seed) data.seed = this.seed; + if (this.topP) data.top_p = this.topP; + if (this.topK) data.top_k = this.topK; + if (this.frequencyPenalty) data.frequency_penalty = this.frequencyPenalty; + if (this.presencePenalty) data.presence_penalty = this.presencePenalty; + if (this.repetitionPenalty) data.repetition_penalty = this.repetitionPenalty; + if (this.reasoningEffort) data.reasoning_effort = this.reasoningEffort; - // Add tools if they exist - const tools = this._buildTools(); - if (tools) data.tools = tools; + // Add tools if they exist + const tools = this._buildTools(); + if (tools) data.tools = tools; - const response = await this.apipieAi.sendChatCompletionRequest({ - $, - data, - timeout: 1000 * 60 * 5, - }); + const response = await this.apipieAi.sendChatCompletionRequest({ + $, + data, + timeout: 1000 * 60 * 5, + }); + if (response.error) { + $.export("Error creating Chat Completion", response.error); + throw new ConfigurationError(e.message || "Failed to create Chat Completion"); + } - if (response.error) { - throw new ConfigurationError(response.error.message); + $.export("$summary", `A new chat completion request with Id: ${response.id} was successfully created!`); + return response; + } catch (e) { + $.export("Error creating Chat Completion", e); + throw new ConfigurationError(e); } - - $.export("$summary", `A new chat completion request with Id: ${response.id} was successfully created!`); - return response; }, }; diff --git a/components/apipie_ai/apipie_ai.app.mjs b/components/apipie_ai/apipie_ai.app.mjs index e38dad7c794d9..17922b6cae21a 100644 --- a/components/apipie_ai/apipie_ai.app.mjs +++ b/components/apipie_ai/apipie_ai.app.mjs @@ -10,19 +10,25 @@ export default { label: "Completions Model", description: "The ID of the LLM model to use for completions.", async options() { - const { data } = await this.listLlmModels(); - const uniqueModels = new Map(); - data.forEach(({ id, name }) => { - if (!uniqueModels.has(id)) { - uniqueModels.set(id, name); - } - }); - return Array.from(uniqueModels.entries()) - .map(([value, label]) => ({ - label, - value, - })) - .sort((a, b) => a.label.localeCompare(b.label)); + let data; + try { + data = await this.listLlmModels(); + const uniqueModels = new Map(); + data.forEach(({ id, name }) => { + if (!uniqueModels.has(id)) { + uniqueModels.set(id, name); + } + }); + return Array.from(uniqueModels.entries()) + .map(([value, label]) => ({ + label, + value, + })) + .sort((a, b) => a.label.localeCompare(b.label)); + } catch (e) { + $.export("Error fetching Image Models", e); + throw new ConfigurationError(e.message || "Failed to fetch LLM models"); + } }, }, imageModelId: { @@ -30,19 +36,24 @@ export default { label: "Model", description: "The ID of the image model to use for completions.", async options() { - const { data } = await this.listImageModels(); - const uniqueModels = new Map(); - data.forEach(({ id, name }) => { - if (!uniqueModels.has(id)) { - uniqueModels.set(id, name); - } - }); - return Array.from(uniqueModels.entries()) - .map(([value, label]) => ({ - label, - value, - })) - .sort((a, b) => a.label.localeCompare(b.label)); + try { + const { data } = await this.listImageModels(); + const uniqueModels = new Map(); + data.forEach(({ id, name }) => { + if (!uniqueModels.has(id)) { + uniqueModels.set(id, name); + } + }); + return Array.from(uniqueModels.entries()) + .map(([value, label]) => ({ + label, + value, + })) + .sort((a, b) => a.label.localeCompare(b.label)); + } catch (e) { + $.export("Error fetching Image Models", e); + throw new ConfigurationError(e.message || "Failed to fetch LLM models"); + } }, }, ttsModelId: { @@ -50,19 +61,24 @@ export default { label: "Model", description: "The ID of the tts model to use for completions.", async options() { - const { data } = await this.listTtsModels(); - const uniqueModels = new Map(); - data.forEach(({ id, name }) => { - if (!uniqueModels.has(id)) { - uniqueModels.set(id, name); - } - }); - return Array.from(uniqueModels.entries()) - .map(([value, label]) => ({ - label, - value, - })) - .sort((a, b) => a.label.localeCompare(b.label)); + try { + const { data } = await this.listTtsModels(); + const uniqueModels = new Map(); + data.forEach(({ id, name }) => { + if (!uniqueModels.has(id)) { + uniqueModels.set(id, name); + } + }); + return Array.from(uniqueModels.entries()) + .map(([value, label]) => ({ + label, + value, + })) + .sort((a, b) => a.label.localeCompare(b.label)); + } catch (e) { + $.export("Error fetching TTS Models", e); + throw new ConfigurationError(e.message || "Failed to fetch TTS models"); + } }, }, maxTokens: { From 0b7444c8c09b1a808e7f16abe92c97cabc0325df Mon Sep 17 00:00:00 2001 From: Toocky <115723035+Toocky@users.noreply.github.com> Date: Sun, 25 May 2025 19:38:57 +0100 Subject: [PATCH 08/10] fixed error importing --- .../convert-text-to-speech/convert-text-to-speech.mjs | 3 ++- .../apipie_ai/actions/create-image/create-image.mjs | 3 ++- .../retrieve-available-image-models.mjs | 1 + .../retrieve-available-llm-models.mjs | 1 + .../retrieve-available-tts-models.mjs | 1 + .../retrieve-available-tts-voices.mjs | 1 + .../send-chat-completion-request.mjs | 2 +- components/apipie_ai/apipie_ai.app.mjs | 8 ++++---- 8 files changed, 13 insertions(+), 7 deletions(-) diff --git a/components/apipie_ai/actions/convert-text-to-speech/convert-text-to-speech.mjs b/components/apipie_ai/actions/convert-text-to-speech/convert-text-to-speech.mjs index 144fd8422e0e6..7531471102261 100644 --- a/components/apipie_ai/actions/convert-text-to-speech/convert-text-to-speech.mjs +++ b/components/apipie_ai/actions/convert-text-to-speech/convert-text-to-speech.mjs @@ -1,5 +1,6 @@ import fs from "fs"; import apipieAi from "../../apipie_ai.app.mjs"; +import { ConfigurationError } from "@pipedream/platform"; export default { key: "apipie_ai-convert-text-to-speech", @@ -96,7 +97,7 @@ export default { if (response.error) { $.export("Error creating audio", response.error); - throw new ConfigurationError(e.message || "Failed to create audio"); + throw new ConfigurationError(response.error.message || "Failed to create audio"); } const outputFilePath = this.outputFile.includes("tmp/") ? this.outputFile diff --git a/components/apipie_ai/actions/create-image/create-image.mjs b/components/apipie_ai/actions/create-image/create-image.mjs index 31f2cbcb5c664..8bdd51ec63cf1 100644 --- a/components/apipie_ai/actions/create-image/create-image.mjs +++ b/components/apipie_ai/actions/create-image/create-image.mjs @@ -1,4 +1,5 @@ import apipieAi from "../../apipie_ai.app.mjs"; +import { ConfigurationError } from "@pipedream/platform"; export default { name: "Create Image", @@ -67,7 +68,7 @@ export default { }); if (response.error) { $.export("Error creating Image", response.error); - throw new ConfigurationError(e.message || "Failed to create Image"); + throw new ConfigurationError(response.error.message || "Failed to create Image"); } if (response.data.length) { $.export("$summary", `Successfully created ${response.data.length} image${response.data.length === 1 diff --git a/components/apipie_ai/actions/retrieve-available-image-models/retrieve-available-image-models.mjs b/components/apipie_ai/actions/retrieve-available-image-models/retrieve-available-image-models.mjs index 714a9e1c2c228..386f4a0380856 100644 --- a/components/apipie_ai/actions/retrieve-available-image-models/retrieve-available-image-models.mjs +++ b/components/apipie_ai/actions/retrieve-available-image-models/retrieve-available-image-models.mjs @@ -1,4 +1,5 @@ import apipieAi from "../../apipie_ai.app.mjs"; +import { ConfigurationError } from "@pipedream/platform"; export default { key: "apipie_ai-retrieve-available-image-models", diff --git a/components/apipie_ai/actions/retrieve-available-llm-models/retrieve-available-llm-models.mjs b/components/apipie_ai/actions/retrieve-available-llm-models/retrieve-available-llm-models.mjs index 67246d60fbb68..830c67303427c 100644 --- a/components/apipie_ai/actions/retrieve-available-llm-models/retrieve-available-llm-models.mjs +++ b/components/apipie_ai/actions/retrieve-available-llm-models/retrieve-available-llm-models.mjs @@ -1,4 +1,5 @@ import apipieAi from "../../apipie_ai.app.mjs"; +import { ConfigurationError } from "@pipedream/platform"; export default { key: "apipie_ai-retrieve-available-llm-models", diff --git a/components/apipie_ai/actions/retrieve-available-tts-models/retrieve-available-tts-models.mjs b/components/apipie_ai/actions/retrieve-available-tts-models/retrieve-available-tts-models.mjs index 64915217e791b..afbb8fc7d4aa1 100644 --- a/components/apipie_ai/actions/retrieve-available-tts-models/retrieve-available-tts-models.mjs +++ b/components/apipie_ai/actions/retrieve-available-tts-models/retrieve-available-tts-models.mjs @@ -1,4 +1,5 @@ import apipieAi from "../../apipie_ai.app.mjs"; +import { ConfigurationError } from "@pipedream/platform"; export default { key: "apipie_ai-retrieve-available-tts-models", diff --git a/components/apipie_ai/actions/retrieve-available-tts-voices/retrieve-available-tts-voices.mjs b/components/apipie_ai/actions/retrieve-available-tts-voices/retrieve-available-tts-voices.mjs index 9c87590313d4a..b74378ca8ea7d 100644 --- a/components/apipie_ai/actions/retrieve-available-tts-voices/retrieve-available-tts-voices.mjs +++ b/components/apipie_ai/actions/retrieve-available-tts-voices/retrieve-available-tts-voices.mjs @@ -1,4 +1,5 @@ import apipieAi from "../../apipie_ai.app.mjs"; +import { ConfigurationError } from "@pipedream/platform"; export default { key: "apipie_ai-retrieve-available-tts-voices", diff --git a/components/apipie_ai/actions/send-chat-completion-request/send-chat-completion-request.mjs b/components/apipie_ai/actions/send-chat-completion-request/send-chat-completion-request.mjs index 5cad31c75f56a..1bff88b297246 100644 --- a/components/apipie_ai/actions/send-chat-completion-request/send-chat-completion-request.mjs +++ b/components/apipie_ai/actions/send-chat-completion-request/send-chat-completion-request.mjs @@ -178,7 +178,7 @@ export default { }); if (response.error) { $.export("Error creating Chat Completion", response.error); - throw new ConfigurationError(e.message || "Failed to create Chat Completion"); + throw new ConfigurationError(response.error.message || "Failed to create Chat Completion"); } $.export("$summary", `A new chat completion request with Id: ${response.id} was successfully created!`); diff --git a/components/apipie_ai/apipie_ai.app.mjs b/components/apipie_ai/apipie_ai.app.mjs index 17922b6cae21a..87fe62ac3e22c 100644 --- a/components/apipie_ai/apipie_ai.app.mjs +++ b/components/apipie_ai/apipie_ai.app.mjs @@ -1,4 +1,5 @@ import { axios } from "@pipedream/platform"; +import { ConfigurationError } from "@pipedream/platform"; import constants from "./common/constants.mjs"; export default { @@ -10,9 +11,8 @@ export default { label: "Completions Model", description: "The ID of the LLM model to use for completions.", async options() { - let data; try { - data = await this.listLlmModels(); + const { data } = await this.listLlmModels(); const uniqueModels = new Map(); data.forEach(({ id, name }) => { if (!uniqueModels.has(id)) { @@ -26,7 +26,7 @@ export default { })) .sort((a, b) => a.label.localeCompare(b.label)); } catch (e) { - $.export("Error fetching Image Models", e); + $.export("Error fetching LLM Models", e); throw new ConfigurationError(e.message || "Failed to fetch LLM models"); } }, @@ -52,7 +52,7 @@ export default { .sort((a, b) => a.label.localeCompare(b.label)); } catch (e) { $.export("Error fetching Image Models", e); - throw new ConfigurationError(e.message || "Failed to fetch LLM models"); + throw new ConfigurationError(e.message || "Failed to fetch Image models"); } }, }, From eb985350b553ff2de1e806a64be1f6d512a93f02 Mon Sep 17 00:00:00 2001 From: Toocky <115723035+Toocky@users.noreply.github.com> Date: Sun, 25 May 2025 19:52:16 +0100 Subject: [PATCH 09/10] removed tools output formating --- components/apipie_ai/apipie_ai.app.mjs | 5 ----- 1 file changed, 5 deletions(-) diff --git a/components/apipie_ai/apipie_ai.app.mjs b/components/apipie_ai/apipie_ai.app.mjs index 87fe62ac3e22c..26f0fdeab9c8b 100644 --- a/components/apipie_ai/apipie_ai.app.mjs +++ b/components/apipie_ai/apipie_ai.app.mjs @@ -169,11 +169,6 @@ export default { max: 4.0, optional: true, }, - toolOutputs: { - type: "string[]", - label: "Tool Outputs", - description: "The outputs from the tool calls. Each object in the array should contain properties `tool_call_id` and `output`.", - }, prompt: { label: "Prompt", description: "A text description of the desired image(s).", From c0590f8aabf5cb5c784a08175305e7c5dc9954e2 Mon Sep 17 00:00:00 2001 From: Toocky <115723035+Toocky@users.noreply.github.com> Date: Tue, 27 May 2025 01:03:19 +0100 Subject: [PATCH 10/10] adjust chat completions error handling --- .../send-chat-completion-request.mjs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/components/apipie_ai/actions/send-chat-completion-request/send-chat-completion-request.mjs b/components/apipie_ai/actions/send-chat-completion-request/send-chat-completion-request.mjs index 1bff88b297246..595b98e9d55c2 100644 --- a/components/apipie_ai/actions/send-chat-completion-request/send-chat-completion-request.mjs +++ b/components/apipie_ai/actions/send-chat-completion-request/send-chat-completion-request.mjs @@ -185,7 +185,7 @@ export default { return response; } catch (e) { $.export("Error creating Chat Completion", e); - throw new ConfigurationError(e); + throw new ConfigurationError(e.message || "Failed to create Chat Completion"); } }, };