diff --git a/agent-apis/src/functions/llm.ts b/agent-apis/src/functions/llm.ts index cb5d483..26d290a 100644 --- a/agent-apis/src/functions/llm.ts +++ b/agent-apis/src/functions/llm.ts @@ -14,7 +14,7 @@ export type OpenAIChatInput = { export const llm = async ({ userContent, systemContent = "", - model = "gpt-4o-mini", + model = "gpt-4.1-mini", }: OpenAIChatInput): Promise => { try { const openai = new OpenAI({ diff --git a/agent-rag/src/functions/llmChat.ts b/agent-rag/src/functions/llmChat.ts index 8fcb860..c0d916f 100644 --- a/agent-rag/src/functions/llmChat.ts +++ b/agent-rag/src/functions/llmChat.ts @@ -21,7 +21,7 @@ export type OpenAIChatInput = { export const llmChat = async ({ systemContent = "", - model = "gpt-4o-mini", + model = "gpt-4.1-mini", messages, }: OpenAIChatInput): Promise => { try { diff --git a/agent-reactflow/apps/backend/src/functions/llmChat.ts b/agent-reactflow/apps/backend/src/functions/llmChat.ts index 0cc4666..a3c0d8a 100644 --- a/agent-reactflow/apps/backend/src/functions/llmChat.ts +++ b/agent-reactflow/apps/backend/src/functions/llmChat.ts @@ -19,7 +19,7 @@ export type OpenAIChatInput = { export const llmChat = async ({ systemContent = "", - model = "gpt-4o", + model = "gpt-4.1-mini", messages, stream = true, tools, diff --git a/agent-reactflow/apps/backend/src/functions/llmResponse.ts b/agent-reactflow/apps/backend/src/functions/llmResponse.ts index 88fedc2..350e75d 100644 --- a/agent-reactflow/apps/backend/src/functions/llmResponse.ts +++ b/agent-reactflow/apps/backend/src/functions/llmResponse.ts @@ -27,7 +27,7 @@ export const llmResponse = async ({ const chatParams: ChatCompletionCreateParamsNonStreaming = { messages: messages, - model: "gpt-4o-mini", + model: "gpt-4.1-mini", response_format: responseFormat, }; diff --git a/agent-reactflow/apps/frontend/app/api/chat/route.ts b/agent-reactflow/apps/frontend/app/api/chat/route.ts index 21ee932..2b3abb0 100644 --- a/agent-reactflow/apps/frontend/app/api/chat/route.ts +++ b/agent-reactflow/apps/frontend/app/api/chat/route.ts @@ -16,7 +16,7 @@ export async function POST(req: Request) { }) const result = streamText({ - model: openaiClient('gpt-4o'), + model: openaiClient('gpt-4.1-mini'), messages, tools: { updateFlow: tool({ diff --git a/agent-stream/src/functions/llmChat.ts b/agent-stream/src/functions/llmChat.ts index 47bfde2..f26090c 100644 --- a/agent-stream/src/functions/llmChat.ts +++ b/agent-stream/src/functions/llmChat.ts @@ -18,7 +18,7 @@ export type OpenAIChatInput = { export const llmChat = async ({ systemContent = "", - model = "gpt-4o-mini", + model = "gpt-4.1-mini", messages, stream = true, }: OpenAIChatInput): Promise => { diff --git a/agent-telephony/twilio-livekit/agent/src/functions/llmLogic.ts b/agent-telephony/twilio-livekit/agent/src/functions/llmLogic.ts index 7e5163c..0d6a013 100644 --- a/agent-telephony/twilio-livekit/agent/src/functions/llmLogic.ts +++ b/agent-telephony/twilio-livekit/agent/src/functions/llmLogic.ts @@ -47,7 +47,7 @@ export const llmLogic = async ({ const completion = await openai.beta.chat.completions.parse({ messages, - model: "gpt-4o", + model: "gpt-4.1-mini", response_format: zodResponseFormat(LlmLogicResponse, "logic"), }); diff --git a/agent-telephony/twilio-livekit/readme.md b/agent-telephony/twilio-livekit/readme.md index 24bb031..c27451a 100644 --- a/agent-telephony/twilio-livekit/readme.md +++ b/agent-telephony/twilio-livekit/readme.md @@ -31,7 +31,7 @@ docker run -d --pull always --name restack -p 5233:5233 -p 6233:6233 -p 7233:723 In all subfolders, duplicate the `env.example` file and rename it to `.env`. -Obtain a Restack API Key to interact with the 'gpt-4o-mini' model at no cost from [Restack Cloud](https://console.restack.io/starter) +Obtain a Restack API Key to interact with the 'gpt-4.1-mini' model at no cost from [Restack Cloud](https://console.restack.io/starter) ## Install dependencies and start services diff --git a/agent-todo/src/functions/llmChat.ts b/agent-todo/src/functions/llmChat.ts index 58be36e..d6a1c43 100644 --- a/agent-todo/src/functions/llmChat.ts +++ b/agent-todo/src/functions/llmChat.ts @@ -26,7 +26,7 @@ export type OpenAIChatInput = { export const llmChat = async ({ systemContent = "", - model = "gpt-4o-mini", + model = "gpt-4.1-mini", messages, tools, }: OpenAIChatInput): Promise => { diff --git a/agent-tool/src/functions/llmChat.ts b/agent-tool/src/functions/llmChat.ts index 58be36e..d6a1c43 100644 --- a/agent-tool/src/functions/llmChat.ts +++ b/agent-tool/src/functions/llmChat.ts @@ -26,7 +26,7 @@ export type OpenAIChatInput = { export const llmChat = async ({ systemContent = "", - model = "gpt-4o-mini", + model = "gpt-4.1-mini", messages, tools, }: OpenAIChatInput): Promise => { diff --git a/agent-voice/livekit/agent/src/functions/llmChat.ts b/agent-voice/livekit/agent/src/functions/llmChat.ts index 47bfde2..f26090c 100644 --- a/agent-voice/livekit/agent/src/functions/llmChat.ts +++ b/agent-voice/livekit/agent/src/functions/llmChat.ts @@ -18,7 +18,7 @@ export type OpenAIChatInput = { export const llmChat = async ({ systemContent = "", - model = "gpt-4o-mini", + model = "gpt-4.1-mini", messages, stream = true, }: OpenAIChatInput): Promise => { diff --git a/agent-voice/livekit/readme.md b/agent-voice/livekit/readme.md index 16e334c..0657e55 100644 --- a/agent-voice/livekit/readme.md +++ b/agent-voice/livekit/readme.md @@ -51,7 +51,7 @@ Your code will be running and syncing with Restack to execute agents. Duplicate the `env.example` file and rename it to `.env`. -Obtain a Restack API Key to interact with the 'gpt-4o-mini' model at no cost from [Restack Cloud](https://console.restack.io/starter) +Obtain a Restack API Key to interact with the 'gpt-4.1-mini' model at no cost from [Restack Cloud](https://console.restack.io/starter) ## Interact in realtime with the agent diff --git a/features-alpha/encryption/src/functions/openai/chat/completionsBase.ts b/features-alpha/encryption/src/functions/openai/chat/completionsBase.ts index 0889059..297dad4 100644 --- a/features-alpha/encryption/src/functions/openai/chat/completionsBase.ts +++ b/features-alpha/encryption/src/functions/openai/chat/completionsBase.ts @@ -25,7 +25,7 @@ export type OpenAIChatInput = { export const openaiChatCompletionsBase = async ({ userContent, systemContent = "", - model = "gpt-4o-mini", + model = "gpt-4.1-mini", jsonSchema, price, apiKey, diff --git a/features-alpha/encryption/src/functions/openai/chat/completionsStream.ts b/features-alpha/encryption/src/functions/openai/chat/completionsStream.ts index bcd9097..43ad509 100644 --- a/features-alpha/encryption/src/functions/openai/chat/completionsStream.ts +++ b/features-alpha/encryption/src/functions/openai/chat/completionsStream.ts @@ -14,7 +14,7 @@ import { SendWorkflowEvent } from "@restackio/ai/event"; import { ChatModel } from "openai/resources/index"; export async function openaiChatCompletionsStream({ - model = "gpt-4o-mini", + model = "gpt-4.1-mini", userName, newMessage, assistantName, diff --git a/features-alpha/encryption/src/functions/openai/thread/createAssistant.ts b/features-alpha/encryption/src/functions/openai/thread/createAssistant.ts index f893399..e5e6bb3 100644 --- a/features-alpha/encryption/src/functions/openai/thread/createAssistant.ts +++ b/features-alpha/encryption/src/functions/openai/thread/createAssistant.ts @@ -8,7 +8,7 @@ export async function createAssistant({ apiKey, name, instructions, - model = "gpt-4o-mini", + model = "gpt-4.1-mini", tools = [], }: { apiKey: string; diff --git a/refactor-needed/posthog/readme.md b/refactor-needed/posthog/readme.md index c655861..f861cf6 100644 --- a/refactor-needed/posthog/readme.md +++ b/refactor-needed/posthog/readme.md @@ -2,7 +2,7 @@ We built this to autonomous AI to watch Posthog Session Recording and create a digest on Linear (optional) -Its using OpenAI GPT-4o-mini to analyse recordings. +Its using OpenAI GPT-4.1-mini to analyse recordings. And OpenAI O1-preview to reason and create a digest in Markdown. By default we retrieve all recodings from last 24 hours, so by scheduling the workflow to run every day we get a digest of all new recordings. diff --git a/refactor-needed/posthog/src/functions/openai/chat/completionsBase.ts b/refactor-needed/posthog/src/functions/openai/chat/completionsBase.ts index 0889059..297dad4 100644 --- a/refactor-needed/posthog/src/functions/openai/chat/completionsBase.ts +++ b/refactor-needed/posthog/src/functions/openai/chat/completionsBase.ts @@ -25,7 +25,7 @@ export type OpenAIChatInput = { export const openaiChatCompletionsBase = async ({ userContent, systemContent = "", - model = "gpt-4o-mini", + model = "gpt-4.1-mini", jsonSchema, price, apiKey, diff --git a/refactor-needed/posthog/src/functions/openai/chat/completionsStream.ts b/refactor-needed/posthog/src/functions/openai/chat/completionsStream.ts index bcd9097..43ad509 100644 --- a/refactor-needed/posthog/src/functions/openai/chat/completionsStream.ts +++ b/refactor-needed/posthog/src/functions/openai/chat/completionsStream.ts @@ -14,7 +14,7 @@ import { SendWorkflowEvent } from "@restackio/ai/event"; import { ChatModel } from "openai/resources/index"; export async function openaiChatCompletionsStream({ - model = "gpt-4o-mini", + model = "gpt-4.1-mini", userName, newMessage, assistantName, diff --git a/refactor-needed/posthog/src/functions/openai/thread/createAssistant.ts b/refactor-needed/posthog/src/functions/openai/thread/createAssistant.ts index f893399..e5e6bb3 100644 --- a/refactor-needed/posthog/src/functions/openai/thread/createAssistant.ts +++ b/refactor-needed/posthog/src/functions/openai/thread/createAssistant.ts @@ -8,7 +8,7 @@ export async function createAssistant({ apiKey, name, instructions, - model = "gpt-4o-mini", + model = "gpt-4.1-mini", tools = [], }: { apiKey: string; diff --git a/refactor-needed/posthog/src/workflows/chunk.ts b/refactor-needed/posthog/src/workflows/chunk.ts index aba3865..660b495 100644 --- a/refactor-needed/posthog/src/workflows/chunk.ts +++ b/refactor-needed/posthog/src/workflows/chunk.ts @@ -46,7 +46,7 @@ export async function chunkWorkflow({ }).openaiChatCompletionsBase({ systemContent: "You are a helpful assistant that summarizes posthog recordings. Here is the snapshot blob of it", - model: "gpt-4o-mini", + model: "gpt-4.1-mini", userContent: ` Here is a chunk of the recording blob: ${chunk} diff --git a/refactor-needed/posthog/src/workflows/recording.ts b/refactor-needed/posthog/src/workflows/recording.ts index eed1652..65ecb42 100644 --- a/refactor-needed/posthog/src/workflows/recording.ts +++ b/refactor-needed/posthog/src/workflows/recording.ts @@ -110,7 +110,7 @@ export async function recordingWorkflow({ }).openaiChatCompletionsBase({ systemContent: "You are a helpful assistant that summarizes posthog recordings.", - model: "gpt-4o-mini", + model: "gpt-4.1-mini", userContent: ` Here are summaries of each chunk of the recording blob: ${summaries} diff --git a/refactor-needed/voice/src/functions/openai/chat/completionsBase.ts b/refactor-needed/voice/src/functions/openai/chat/completionsBase.ts index 0889059..297dad4 100644 --- a/refactor-needed/voice/src/functions/openai/chat/completionsBase.ts +++ b/refactor-needed/voice/src/functions/openai/chat/completionsBase.ts @@ -25,7 +25,7 @@ export type OpenAIChatInput = { export const openaiChatCompletionsBase = async ({ userContent, systemContent = "", - model = "gpt-4o-mini", + model = "gpt-4.1-mini", jsonSchema, price, apiKey, diff --git a/refactor-needed/voice/src/functions/openai/chat/completionsStream.ts b/refactor-needed/voice/src/functions/openai/chat/completionsStream.ts index bcd9097..43ad509 100644 --- a/refactor-needed/voice/src/functions/openai/chat/completionsStream.ts +++ b/refactor-needed/voice/src/functions/openai/chat/completionsStream.ts @@ -14,7 +14,7 @@ import { SendWorkflowEvent } from "@restackio/ai/event"; import { ChatModel } from "openai/resources/index"; export async function openaiChatCompletionsStream({ - model = "gpt-4o-mini", + model = "gpt-4.1-mini", userName, newMessage, assistantName, diff --git a/refactor-needed/voice/src/functions/openai/thread/createAssistant.ts b/refactor-needed/voice/src/functions/openai/thread/createAssistant.ts index f893399..e5e6bb3 100644 --- a/refactor-needed/voice/src/functions/openai/thread/createAssistant.ts +++ b/refactor-needed/voice/src/functions/openai/thread/createAssistant.ts @@ -8,7 +8,7 @@ export async function createAssistant({ apiKey, name, instructions, - model = "gpt-4o-mini", + model = "gpt-4.1-mini", tools = [], }: { apiKey: string; diff --git a/refactor-needed/voice/src/workflows/conversation/conversation.ts b/refactor-needed/voice/src/workflows/conversation/conversation.ts index 56303f8..21a9ac5 100644 --- a/refactor-needed/voice/src/workflows/conversation/conversation.ts +++ b/refactor-needed/voice/src/workflows/conversation/conversation.ts @@ -35,7 +35,7 @@ export async function conversationWorkflow({ taskQueue: "erp", }).erpGetTools(); - const model: ChatModel = "gpt-4o-mini"; + const model: ChatModel = "gpt-4.1-mini"; const commonOpenaiOptions = { model,