Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion agent-apis/src/functions/llm.ts
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ export type OpenAIChatInput = {
export const llm = async ({
userContent,
systemContent = "",
model = "gpt-4o-mini",
model = "gpt-4.1-mini",
}: OpenAIChatInput): Promise<string> => {
try {
const openai = new OpenAI({
Expand Down
2 changes: 1 addition & 1 deletion agent-rag/src/functions/llmChat.ts
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ export type OpenAIChatInput = {

export const llmChat = async ({
systemContent = "",
model = "gpt-4o-mini",
model = "gpt-4.1-mini",
messages,
}: OpenAIChatInput): Promise<string> => {
try {
Expand Down
2 changes: 1 addition & 1 deletion agent-reactflow/apps/backend/src/functions/llmChat.ts
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ export type OpenAIChatInput = {

export const llmChat = async ({
systemContent = "",
model = "gpt-4o",
model = "gpt-4.1-mini",
messages,
stream = true,
tools,
Expand Down
2 changes: 1 addition & 1 deletion agent-reactflow/apps/backend/src/functions/llmResponse.ts
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ export const llmResponse = async ({

const chatParams: ChatCompletionCreateParamsNonStreaming = {
messages: messages,
model: "gpt-4o-mini",
model: "gpt-4.1-mini",
response_format: responseFormat,
};

Expand Down
2 changes: 1 addition & 1 deletion agent-reactflow/apps/frontend/app/api/chat/route.ts
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ export async function POST(req: Request) {
})

const result = streamText({
model: openaiClient('gpt-4o'),
model: openaiClient('gpt-4.1-mini'),
messages,
tools: {
updateFlow: tool({
Expand Down
2 changes: 1 addition & 1 deletion agent-stream/src/functions/llmChat.ts
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ export type OpenAIChatInput = {

export const llmChat = async ({
systemContent = "",
model = "gpt-4o-mini",
model = "gpt-4.1-mini",
messages,
stream = true,
}: OpenAIChatInput): Promise<Message> => {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ export const llmLogic = async ({

const completion = await openai.beta.chat.completions.parse({
messages,
model: "gpt-4o",
model: "gpt-4.1-mini",
response_format: zodResponseFormat(LlmLogicResponse, "logic"),
});

Expand Down
2 changes: 1 addition & 1 deletion agent-telephony/twilio-livekit/readme.md
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ docker run -d --pull always --name restack -p 5233:5233 -p 6233:6233 -p 7233:723

In all subfolders, duplicate the `env.example` file and rename it to `.env`.

Obtain a Restack API Key to interact with the 'gpt-4o-mini' model at no cost from [Restack Cloud](https://console.restack.io/starter)
Obtain a Restack API Key to interact with the 'gpt-4.1-mini' model at no cost from [Restack Cloud](https://console.restack.io/starter)


## Install dependencies and start services
Expand Down
2 changes: 1 addition & 1 deletion agent-todo/src/functions/llmChat.ts
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ export type OpenAIChatInput = {

export const llmChat = async ({
systemContent = "",
model = "gpt-4o-mini",
model = "gpt-4.1-mini",
messages,
tools,
}: OpenAIChatInput): Promise<ChatCompletionMessage> => {
Expand Down
2 changes: 1 addition & 1 deletion agent-tool/src/functions/llmChat.ts
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ export type OpenAIChatInput = {

export const llmChat = async ({
systemContent = "",
model = "gpt-4o-mini",
model = "gpt-4.1-mini",
messages,
tools,
}: OpenAIChatInput): Promise<ChatCompletionMessage> => {
Expand Down
2 changes: 1 addition & 1 deletion agent-voice/livekit/agent/src/functions/llmChat.ts
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ export type OpenAIChatInput = {

export const llmChat = async ({
systemContent = "",
model = "gpt-4o-mini",
model = "gpt-4.1-mini",
messages,
stream = true,
}: OpenAIChatInput): Promise<Message> => {
Expand Down
2 changes: 1 addition & 1 deletion agent-voice/livekit/readme.md
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ Your code will be running and syncing with Restack to execute agents.

Duplicate the `env.example` file and rename it to `.env`.

Obtain a Restack API Key to interact with the 'gpt-4o-mini' model at no cost from [Restack Cloud](https://console.restack.io/starter)
Obtain a Restack API Key to interact with the 'gpt-4.1-mini' model at no cost from [Restack Cloud](https://console.restack.io/starter)


## Interact in realtime with the agent
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ export type OpenAIChatInput = {
export const openaiChatCompletionsBase = async ({
userContent,
systemContent = "",
model = "gpt-4o-mini",
model = "gpt-4.1-mini",
jsonSchema,
price,
apiKey,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ import { SendWorkflowEvent } from "@restackio/ai/event";
import { ChatModel } from "openai/resources/index";

export async function openaiChatCompletionsStream({
model = "gpt-4o-mini",
model = "gpt-4.1-mini",
userName,
newMessage,
assistantName,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ export async function createAssistant({
apiKey,
name,
instructions,
model = "gpt-4o-mini",
model = "gpt-4.1-mini",
tools = [],
}: {
apiKey: string;
Expand Down
2 changes: 1 addition & 1 deletion refactor-needed/posthog/readme.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

We built this to autonomous AI to watch Posthog Session Recording and create a digest on Linear (optional)

Its using OpenAI GPT-4o-mini to analyse recordings.
Its using OpenAI GPT-4.1-mini to analyse recordings.
And OpenAI O1-preview to reason and create a digest in Markdown.

By default we retrieve all recodings from last 24 hours, so by scheduling the workflow to run every day we get a digest of all new recordings.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ export type OpenAIChatInput = {
export const openaiChatCompletionsBase = async ({
userContent,
systemContent = "",
model = "gpt-4o-mini",
model = "gpt-4.1-mini",
jsonSchema,
price,
apiKey,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ import { SendWorkflowEvent } from "@restackio/ai/event";
import { ChatModel } from "openai/resources/index";

export async function openaiChatCompletionsStream({
model = "gpt-4o-mini",
model = "gpt-4.1-mini",
userName,
newMessage,
assistantName,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ export async function createAssistant({
apiKey,
name,
instructions,
model = "gpt-4o-mini",
model = "gpt-4.1-mini",
tools = [],
}: {
apiKey: string;
Expand Down
2 changes: 1 addition & 1 deletion refactor-needed/posthog/src/workflows/chunk.ts
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ export async function chunkWorkflow({
}).openaiChatCompletionsBase({
systemContent:
"You are a helpful assistant that summarizes posthog recordings. Here is the snapshot blob of it",
model: "gpt-4o-mini",
model: "gpt-4.1-mini",
userContent: `
Here is a chunk of the recording blob:
${chunk}
Expand Down
2 changes: 1 addition & 1 deletion refactor-needed/posthog/src/workflows/recording.ts
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ export async function recordingWorkflow({
}).openaiChatCompletionsBase({
systemContent:
"You are a helpful assistant that summarizes posthog recordings.",
model: "gpt-4o-mini",
model: "gpt-4.1-mini",
userContent: `
Here are summaries of each chunk of the recording blob:
${summaries}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ export type OpenAIChatInput = {
export const openaiChatCompletionsBase = async ({
userContent,
systemContent = "",
model = "gpt-4o-mini",
model = "gpt-4.1-mini",
jsonSchema,
price,
apiKey,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ import { SendWorkflowEvent } from "@restackio/ai/event";
import { ChatModel } from "openai/resources/index";

export async function openaiChatCompletionsStream({
model = "gpt-4o-mini",
model = "gpt-4.1-mini",
userName,
newMessage,
assistantName,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ export async function createAssistant({
apiKey,
name,
instructions,
model = "gpt-4o-mini",
model = "gpt-4.1-mini",
tools = [],
}: {
apiKey: string;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ export async function conversationWorkflow({
taskQueue: "erp",
}).erpGetTools();

const model: ChatModel = "gpt-4o-mini";
const model: ChatModel = "gpt-4.1-mini";

const commonOpenaiOptions = {
model,
Expand Down