diff --git a/components/x_ai/actions/create-embeddings/create-embeddings.mjs b/components/x_ai/actions/create-embeddings/create-embeddings.mjs new file mode 100644 index 0000000000000..0090c3e80c394 --- /dev/null +++ b/components/x_ai/actions/create-embeddings/create-embeddings.mjs @@ -0,0 +1,57 @@ +import app from "../../x_ai.app.mjs"; + +export default { + key: "x_ai-create-embeddings", + name: "Create Embedding", + description: "Create an embedding vector representation corresponding to the input text. [See the documentation](https://docs.x.ai/api/endpoints#create-embeddings)", + version: "0.0.2", + type: "action", + props: { + app, + embeddingModel: { + propDefinition: [ + app, + "embeddingModel", + ], + }, + input: { + propDefinition: [ + app, + "input", + ], + }, + dimensions: { + propDefinition: [ + app, + "dimensions", + ], + }, + encodingFormat: { + propDefinition: [ + app, + "encodingFormat", + ], + }, + user: { + propDefinition: [ + app, + "user", + ], + }, + }, + + async run({ $ }) { + const response = await this.app.createEmbeddings({ + $, + data: { + dimensions: this.dimensions, + encoding_format: this.encodingFormat, + input: this.input, + model: this.embeddingModel, + user: this.user, + }, + }); + $.export("$summary", "Successfully created embedding"); + return response; + }, +}; diff --git a/components/x_ai/actions/get-model/get-model.mjs b/components/x_ai/actions/get-model/get-model.mjs index 528bac3a56062..10655a84d0189 100644 --- a/components/x_ai/actions/get-model/get-model.mjs +++ b/components/x_ai/actions/get-model/get-model.mjs @@ -4,7 +4,7 @@ export default { key: "x_ai-get-model", name: "Get Model", description: "List all language and embedding models available. [See the documentation](https://docs.x.ai/api/endpoints#get-model)", - version: "0.0.1", + version: "0.0.2", type: "action", props: { app, @@ -21,9 +21,7 @@ export default { $, model: this.model, }); - $.export("$summary", `Successfully retrieved the '${this.model}' model`); - return response; }, }; diff --git a/components/x_ai/actions/post-chat-completion/post-chat-completion.mjs b/components/x_ai/actions/post-chat-completion/post-chat-completion.mjs index 2132827b18a58..a9ed57210752d 100644 --- a/components/x_ai/actions/post-chat-completion/post-chat-completion.mjs +++ b/components/x_ai/actions/post-chat-completion/post-chat-completion.mjs @@ -4,7 +4,7 @@ export default { key: "x_ai-post-chat-completion", name: "Post Chat Completion", description: "Create a language model response for a chat conversation. [See the documentation](https://docs.x.ai/api/endpoints#chat-completions)", - version: "0.0.1", + version: "0.0.2", type: "action", props: { app, @@ -20,6 +20,66 @@ export default { "message", ], }, + frequencyPenalty: { + propDefinition: [ + app, + "frequencyPenalty", + ], + }, + logprobs: { + propDefinition: [ + app, + "logprobs", + ], + }, + maxTokens: { + propDefinition: [ + app, + "maxTokens", + ], + }, + n: { + propDefinition: [ + app, + "n", + ], + }, + presencePenalty: { + propDefinition: [ + app, + "presencePenalty", + ], + }, + seed: { + propDefinition: [ + app, + "seed", + ], + }, + stream: { + propDefinition: [ + app, + "stream", + ], + }, + temperature: { + propDefinition: [ + app, + "temperature", + ], + }, + topP: { + propDefinition: [ + app, + "topP", + ], + }, + user: { + propDefinition: [ + app, + "user", + ], + }, }, async run({ $ }) { @@ -28,16 +88,24 @@ export default { data: { model: this.model, messages: [ - { - role: "user", - content: this.message, + { + role: "user", + content: this.message, }, ], + frequency_penalty: Number(this.frequencyPenalty), + logprobs: this.logprobs, + max_tokens: this.maxTokens, + n: this.n, + presence_penalty: Number(this.presencePenalty), + seed: this.seed, + stream: this.stream, + temperature: Number(this.temperature), + top_p: Number(this.topP), + user: this.user, }, }); - $.export("$summary", `Successfully sent message to the model '${this.model}'`); - return response; }, }; diff --git a/components/x_ai/actions/post-completion/post-completion.mjs b/components/x_ai/actions/post-completion/post-completion.mjs index 2fd758abc85c9..e6ae9fdda99b2 100644 --- a/components/x_ai/actions/post-completion/post-completion.mjs +++ b/components/x_ai/actions/post-completion/post-completion.mjs @@ -4,7 +4,7 @@ export default { key: "x_ai-post-completion", name: "Post Completion", description: "Create a language model response for a given prompt. [See the documentation](https://docs.x.ai/api/endpoints#completions)", - version: "0.0.1", + version: "0.0.2", type: "action", props: { app, @@ -20,6 +20,78 @@ export default { "prompt", ], }, + echo: { + propDefinition: [ + app, + "echo", + ], + }, + frequencyPenalty: { + propDefinition: [ + app, + "frequencyPenalty", + ], + }, + logprobs: { + propDefinition: [ + app, + "logprobs", + ], + }, + maxTokens: { + propDefinition: [ + app, + "maxTokens", + ], + }, + n: { + propDefinition: [ + app, + "n", + ], + }, + presencePenalty: { + propDefinition: [ + app, + "presencePenalty", + ], + }, + seed: { + propDefinition: [ + app, + "seed", + ], + }, + stream: { + propDefinition: [ + app, + "stream", + ], + }, + suffix: { + propDefinition: [ + app, + "suffix", + ], + }, + temperature: { + propDefinition: [ + app, + "temperature", + ], + }, + topP: { + propDefinition: [ + app, + "topP", + ], + }, + user: { + propDefinition: [ + app, + "user", + ], + }, }, async run({ $ }) { @@ -28,11 +100,21 @@ export default { data: { model: this.model, prompt: this.prompt, + echo: this.echo, + frequency_penalty: Number(this.frequencyPenalty), + logprobs: this.logprobs, + max_tokens: this.maxTokens, + n: this.n, + presence_penalty: Number(this.presencePenalty), + seed: this.seed, + stream: this.stream, + suffix: this.suffix, + temperature: Number(this.temperature), + top_p: Number(this.topP), + user: this.user, }, }); - $.export("$summary", `Successfully sent prompt to the model '${this.model}'`); - return response; }, }; diff --git a/components/x_ai/common/constants.mjs b/components/x_ai/common/constants.mjs new file mode 100644 index 0000000000000..ecde3a72419cb --- /dev/null +++ b/components/x_ai/common/constants.mjs @@ -0,0 +1,6 @@ +export default { + ENCODING_FORMATS: [ + "float", + "base64", + ], +}; diff --git a/components/x_ai/package.json b/components/x_ai/package.json index 902290c662976..f5bc02777b51e 100644 --- a/components/x_ai/package.json +++ b/components/x_ai/package.json @@ -1,7 +1,7 @@ { "name": "@pipedream/x_ai", - "version": "0.1.0", - "description": "Pipedream X AI Components", + "version": "0.1.1", + "description": "Pipedream Chat Data Components", "main": "x_ai.app.mjs", "keywords": [ "pipedream", diff --git a/components/x_ai/x_ai.app.mjs b/components/x_ai/x_ai.app.mjs index 328c89ebe813c..56a75ffe909b4 100644 --- a/components/x_ai/x_ai.app.mjs +++ b/components/x_ai/x_ai.app.mjs @@ -1,4 +1,5 @@ import { axios } from "@pipedream/platform"; +import constants from "./common/constants.mjs"; export default { type: "app", @@ -7,15 +8,27 @@ export default { model: { type: "string", label: "Model", - description: "Specifies the model to be used for the request", + description: "ID of the embedding model to use", async options() { - const response = await this.listModels(); + const response = await this.getModels(); const modelsIds = response.data; return modelsIds.map(({ id }) => ({ value: id, })); }, }, + embeddingModel: { + type: "string", + label: "Embedding Models", + description: "ID of the embedding model to use", + async options() { + const response = await this.getEmbeddingModels(); + const embeddingModelsIds = response.models; + return embeddingModelsIds.map(({ id }) => ({ + value: id, + })); + }, + }, prompt: { type: "string", label: "Prompt", @@ -26,6 +39,96 @@ export default { label: "Message", description: "Message for the chat completion", }, + echo: { + type: "boolean", + label: "Echo", + description: "Option to include the original prompt in the response along with the generated completion", + optional: true, + }, + frequencyPenalty: { + type: "string", + label: "Frequency Penalty", + description: "Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim", + optional: true, + }, + logprobs: { + type: "boolean", + label: "Log Probabilities", + description: "Include the log probabilities on the `logprobs` most likely output tokens, as well the chosen tokens", + optional: true, + }, + maxTokens: { + type: "integer", + label: "Max Tokens", + description: "Limits the number of tokens that can be produced in the output", + optional: true, + }, + n: { + type: "integer", + label: "Completion Number", + description: "Determines how many completion sequences to produce for each prompt. Be cautious with its use due to high token consumption", + optional: true, + }, + presencePenalty: { + type: "string", + label: "Presence Penalty", + description: "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics", + optional: true, + }, + seed: { + type: "integer", + label: "Seed", + description: "If specified, our system will make a best effort to sample deterministically", + optional: true, + }, + stream: { + type: "boolean", + label: "Stream", + description: "Whether to stream back partial progress. If set, tokens will be sent as data-only server-sent events as they become available", + optional: true, + }, + suffix: { + type: "string", + label: "Suffix", + description: "Optional string to append after the generated text", + optional: true, + }, + temperature: { + type: "string", + label: "Temperature", + description: "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic", + optional: true, + }, + topP: { + type: "string", + label: "Nucleus Sampling", + description: "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered", + optional: true, + }, + user: { + type: "string", + label: "User", + description: "A unique identifier representing your end-user, which can help xAI to monitor and detect abuse", + optional: true, + }, + dimensions: { + type: "integer", + label: "Dimensions", + description: "The number of dimensions the resulting output embeddings should have", + optional: true, + }, + encodingFormat: { + type: "string", + label: "Encoding Format", + description: "The format to return the embeddings in", + optional: true, + options: constants.ENCODING_FORMATS, + }, + input: { + type: "string[]", + label: "Input", + description: "Text input to be converted into an embedding", + }, }, methods: { _baseUrl() { @@ -61,6 +164,13 @@ export default { ...args, }); }, + async createEmbeddings(args = {}) { + return this._makeRequest({ + path: "/v1/embeddings", + method: "post", + ...args, + }); + }, async getModel({ model, ...args }) { @@ -69,11 +179,17 @@ export default { ...args, }); }, - async listModels(args = {}) { + async getModels(args = {}) { return this._makeRequest({ path: "/v1/models", ...args, }); }, + async getEmbeddingModels(args = {}) { + return this._makeRequest({ + path: "/v1/embedding-models", + ...args, + }); + }, }, };