diff --git a/agent_apis/src/functions/llm.py b/agent_apis/src/functions/llm.py index 951d6c26..f3135424 100644 --- a/agent_apis/src/functions/llm.py +++ b/agent_apis/src/functions/llm.py @@ -41,7 +41,7 @@ async def llm(function_input: FunctionInputParams) -> str: messages.append({"role": "user", "content": function_input.user_content}) response = client.chat.completions.create( - model=function_input.model or "gpt-4o-mini", messages=messages + model=function_input.model or "gpt-4.1-mini", messages=messages ) log.info("llm function completed", response=response) return response.choices[0].message.content diff --git a/agent_apis/src/workflows/multistep.py b/agent_apis/src/workflows/multistep.py index a63c6be9..a7ddb6a1 100644 --- a/agent_apis/src/workflows/multistep.py +++ b/agent_apis/src/workflows/multistep.py @@ -35,7 +35,7 @@ async def run(self, workflow_input: WorkflowInputParams) -> dict: function_input=FunctionInputParams( system_content=f"You are a personal assitant and have access to weather data {weather_data}. Always greet person with relevant info from weather data", user_content=user_content, - model="gpt-4o-mini", + model="gpt-4.1-mini", ), start_to_close_timeout=timedelta(seconds=120), ) diff --git a/agent_chat/src/functions/llm_chat.py b/agent_chat/src/functions/llm_chat.py index e808fed4..6afa8503 100644 --- a/agent_chat/src/functions/llm_chat.py +++ b/agent_chat/src/functions/llm_chat.py @@ -44,7 +44,7 @@ async def llm_chat(agent_input: LlmChatInput) -> dict[str, str]: ) assistant_raw_response = client.chat.completions.create( - model=agent_input.model or "gpt-4o-mini", + model=agent_input.model or "gpt-4.1-mini", messages=agent_input.messages, ) except Exception as e: diff --git a/agent_rag/README.md b/agent_rag/README.md index 12b96a68..0dedf479 100644 --- a/agent_rag/README.md +++ b/agent_rag/README.md @@ -51,7 +51,7 @@ python -c "from src.services import watch_services; watch_services()" Duplicate the `env.example` file and rename it to `.env`. -Obtain a Restack API Key to interact with the 'gpt-4o-mini' model at no cost from [console.restack.io](https://console.restack.io) +Obtain a Restack API Key to interact with the 'gpt-4.1-mini' model at no cost from [console.restack.io](https://console.restack.io) ## Run agents diff --git a/agent_rag/src/functions/llm_chat.py b/agent_rag/src/functions/llm_chat.py index b60e5c36..a62f1a03 100644 --- a/agent_rag/src/functions/llm_chat.py +++ b/agent_rag/src/functions/llm_chat.py @@ -45,7 +45,7 @@ async def llm_chat(function_input: LlmChatInput) -> ChatCompletion: ) response = client.chat.completions.create( - model=function_input.model or "gpt-4o-mini", + model=function_input.model or "gpt-4.1-mini", messages=function_input.messages, ) except Exception as e: diff --git a/agent_stream/src/functions/llm_chat.py b/agent_stream/src/functions/llm_chat.py index 63ecae34..4bee1401 100644 --- a/agent_stream/src/functions/llm_chat.py +++ b/agent_stream/src/functions/llm_chat.py @@ -40,7 +40,7 @@ async def llm_chat(function_input: LlmChatInput) -> str: messages_dicts = [message.model_dump() for message in function_input.messages] # Get the streamed response from OpenAI API response: Stream[ChatCompletionChunk] = client.chat.completions.create( - model=function_input.model or "gpt-4o-mini", + model=function_input.model or "gpt-4.1-mini", messages=messages_dicts, stream=True, ) diff --git a/agent_telephony/twilio_livekit/readme.md b/agent_telephony/twilio_livekit/readme.md index 81b12992..1e0e2120 100644 --- a/agent_telephony/twilio_livekit/readme.md +++ b/agent_telephony/twilio_livekit/readme.md @@ -31,7 +31,7 @@ docker run -d --pull always --name restack -p 5233:5233 -p 6233:6233 -p 7233:723 In all subfolders, duplicate the `env.example` file and rename it to `.env`. -Obtain a Restack API Key to interact with the 'gpt-4o-mini' model at no cost from [Restack Cloud](https://console.restack.io/starter) +Obtain a Restack API Key to interact with the 'gpt-4.1-mini' model at no cost from [Restack Cloud](https://console.restack.io/starter) ## Start Restack Agent with Twilio @@ -102,7 +102,7 @@ python src/worker.py dev Duplicate the `env.example` file and rename it to `.env`. -Obtain a Restack API Key to interact with the 'gpt-4o-mini' model at no cost from [Restack Cloud](https://console.restack.io/starter) +Obtain a Restack API Key to interact with the 'gpt-4.1-mini' model at no cost from [Restack Cloud](https://console.restack.io/starter) ## Create a new Agent diff --git a/agent_telephony/vapi/agent_vapi/readme.md b/agent_telephony/vapi/agent_vapi/readme.md index 8e28f540..34c58b8b 100644 --- a/agent_telephony/vapi/agent_vapi/readme.md +++ b/agent_telephony/vapi/agent_vapi/readme.md @@ -28,7 +28,7 @@ docker run -d --pull always --name restack -p 5233:5233 -p 6233:6233 -p 7233:723 In all subfolders, duplicate the `env.example` file and rename it to `.env`. -Obtain a Restack API Key to interact with the 'gpt-4o-mini' model at no cost from [Restack Cloud](https://console.restack.io/starter) +Obtain a Restack API Key to interact with the 'gpt-4.1-mini' model at no cost from [Restack Cloud](https://console.restack.io/starter) ## Start Restack Agent with Twilio @@ -99,7 +99,7 @@ python src/pipeline.py dev Duplicate the `env.example` file and rename it to `.env`. -Obtain a Restack API Key to interact with the 'gpt-4o-mini' model at no cost from [Restack Cloud](https://console.restack.io/starter) +Obtain a Restack API Key to interact with the 'gpt-4.1-mini' model at no cost from [Restack Cloud](https://console.restack.io/starter) ## Create a new Agent diff --git a/agent_telephony/vapi/agent_vapi/src/functions/llm_chat.py b/agent_telephony/vapi/agent_vapi/src/functions/llm_chat.py index ce82e2a4..48ccff3d 100644 --- a/agent_telephony/vapi/agent_vapi/src/functions/llm_chat.py +++ b/agent_telephony/vapi/agent_vapi/src/functions/llm_chat.py @@ -40,7 +40,7 @@ async def llm_chat(function_input: LlmChatInput) -> str: messages_dicts = [message.model_dump() for message in function_input.messages] # Get the streamed response from OpenAI API response: Stream[ChatCompletionChunk] = client.chat.completions.create( - model=function_input.model or "gpt-4o-mini", + model=function_input.model or "gpt-4.1-mini", messages=messages_dicts, stream=True, ) diff --git a/agent_todo/src/functions/llm_chat.py b/agent_todo/src/functions/llm_chat.py index 6f5518ba..57fe1c9f 100644 --- a/agent_todo/src/functions/llm_chat.py +++ b/agent_todo/src/functions/llm_chat.py @@ -55,7 +55,7 @@ async def llm_chat(function_input: LlmChatInput) -> ChatCompletion: ) response = client.chat.completions.create( - model=function_input.model or "gpt-4o-mini", + model=function_input.model or "gpt-4.1-mini", messages=function_input.messages, tools=function_input.tools, ) diff --git a/agent_tool/README.md b/agent_tool/README.md index c956defb..f125a9b3 100644 --- a/agent_tool/README.md +++ b/agent_tool/README.md @@ -51,7 +51,7 @@ python -c "from src.services import watch_services; watch_services()" Duplicate the `env.example` file and rename it to `.env`. -Obtain a Restack API Key to interact with the 'gpt-4o-mini' model at no cost from [Restack Cloud](https://console.restack.io/starter) +Obtain a Restack API Key to interact with the 'gpt-4.1-mini' model at no cost from [Restack Cloud](https://console.restack.io/starter) ## Run agents diff --git a/agent_tool/src/functions/llm_chat.py b/agent_tool/src/functions/llm_chat.py index 4a3533b7..c2f8869a 100644 --- a/agent_tool/src/functions/llm_chat.py +++ b/agent_tool/src/functions/llm_chat.py @@ -55,7 +55,7 @@ async def llm_chat(function_input: LlmChatInput) -> ChatCompletion: ) result = client.chat.completions.create( - model=function_input.model or "gpt-4o-mini", + model=function_input.model or "gpt-4.1-mini", messages=function_input.messages, tools=function_input.tools, ) diff --git a/agent_video/README.md b/agent_video/README.md index 1b794c95..a78b1932 100644 --- a/agent_video/README.md +++ b/agent_video/README.md @@ -55,7 +55,7 @@ python -c "from src.services import watch_services; watch_services()" Duplicate the `env.example` file and rename it to `.env`. -Obtain a Restack API Key to interact with the 'gpt-4o-mini' model at no cost from [Restack Cloud](https://console.restack.io/starter) +Obtain a Restack API Key to interact with the 'gpt-4.1-mini' model at no cost from [Restack Cloud](https://console.restack.io/starter) ## Create Room and run Agent in parallel diff --git a/agent_video/src/functions/llm_chat.py b/agent_video/src/functions/llm_chat.py index ce82e2a4..48ccff3d 100644 --- a/agent_video/src/functions/llm_chat.py +++ b/agent_video/src/functions/llm_chat.py @@ -40,7 +40,7 @@ async def llm_chat(function_input: LlmChatInput) -> str: messages_dicts = [message.model_dump() for message in function_input.messages] # Get the streamed response from OpenAI API response: Stream[ChatCompletionChunk] = client.chat.completions.create( - model=function_input.model or "gpt-4o-mini", + model=function_input.model or "gpt-4.1-mini", messages=messages_dicts, stream=True, ) diff --git a/agent_voice/livekit/README.md b/agent_voice/livekit/README.md index a0a27fca..37e7463c 100644 --- a/agent_voice/livekit/README.md +++ b/agent_voice/livekit/README.md @@ -34,7 +34,7 @@ In all subfolders, duplicate the `env.example` file and rename it to `.env`. - Sign up at [ElevenLabs](https://elevenlabs.io/docs/overview) - Add `ELEVENLABS_API_KEY` to .env file -Obtain a Restack API Key to interact with the 'gpt-4o-mini' model at no cost from [Restack Cloud](https://console.restack.io/starter) +Obtain a Restack API Key to interact with the 'gpt-4.1-mini' model at no cost from [Restack Cloud](https://console.restack.io/starter) ## Start Restack diff --git a/agent_voice/livekit/agent/src/functions/llm_chat.py b/agent_voice/livekit/agent/src/functions/llm_chat.py index ac5be33e..e18fce4a 100644 --- a/agent_voice/livekit/agent/src/functions/llm_chat.py +++ b/agent_voice/livekit/agent/src/functions/llm_chat.py @@ -40,7 +40,7 @@ async def llm_chat(function_input: LlmChatInput) -> str: messages_dicts = [message.model_dump() for message in function_input.messages] # Get the streamed response from OpenAI API response: Stream[ChatCompletionChunk] = client.chat.completions.create( - model=function_input.model or "gpt-4o-mini", + model=function_input.model or "gpt-4.1-mini", messages=messages_dicts, stream=True, ) diff --git a/agent_voice/livekit/livekit_pipeline/src/pipeline.py b/agent_voice/livekit/livekit_pipeline/src/pipeline.py index d03bc4f7..5c7b23d3 100644 --- a/agent_voice/livekit/livekit_pipeline/src/pipeline.py +++ b/agent_voice/livekit/livekit_pipeline/src/pipeline.py @@ -95,7 +95,7 @@ async def entrypoint(ctx: JobContext) -> None: vad=ctx.proc.userdata["vad"], stt=deepgram.STT(), llm=openai.LLM( - # model="gpt-4o-mini", + # model="gpt-4.1-mini", # api_key=os.environ.get("OPENAI_API_KEY"), api_key=f"{agent_id}-livekit", base_url=agent_url, diff --git a/agent_voice/pipecat/pipecat_pipeline/src/pipeline.py b/agent_voice/pipecat/pipecat_pipeline/src/pipeline.py index a223ddf5..009e30df 100644 --- a/agent_voice/pipecat/pipecat_pipeline/src/pipeline.py +++ b/agent_voice/pipecat/pipecat_pipeline/src/pipeline.py @@ -51,7 +51,7 @@ async def main() -> None: live_options=LiveOptions(vad_events=True, utterance_end_ms="1000"), ) - llm = OpenAILLMService(api_key=os.getenv("OPENAI_API_KEY"), model="gpt-4o-mini") + llm = OpenAILLMService(api_key=os.getenv("OPENAI_API_KEY"), model="gpt-4.1-mini") tts = ElevenLabsTTSService( api_key=os.getenv("ELEVENLABS_API_KEY", ""), diff --git a/audio_transcript/pyproject.toml b/audio_transcript/pyproject.toml index 5ddfa7d6..cfdbe509 100644 --- a/audio_transcript/pyproject.toml +++ b/audio_transcript/pyproject.toml @@ -1,7 +1,7 @@ [project] name = "audio_transcript" version = "0.0.1" -description = "Transcribe audio with OpenAI Whisper and translate the text with OpenAI GPT-4o-mini" +description = "Transcribe audio with OpenAI Whisper and translate the text with OpenAI gpt-4.1-mini" authors = [{ name = "Restack Team", email = "service@restack.io" }] requires-python = ">=3.10,<3.14" readme = "README.md" diff --git a/audio_transcript/src/functions/translate_text.py b/audio_transcript/src/functions/translate_text.py index aff93d7d..ebd88b48 100644 --- a/audio_transcript/src/functions/translate_text.py +++ b/audio_transcript/src/functions/translate_text.py @@ -20,7 +20,7 @@ async def translate_text(input: TranslateTextInput): try: response = client.chat.completions.create( - model="gpt-4o-mini", + model="gpt-4.1-mini", messages=[ { "role": "system", diff --git a/community/defense_quickstart_audio_transcription_translation/src/functions/translate.py b/community/defense_quickstart_audio_transcription_translation/src/functions/translate.py index b5bb1eef..c2782299 100644 --- a/community/defense_quickstart_audio_transcription_translation/src/functions/translate.py +++ b/community/defense_quickstart_audio_transcription_translation/src/functions/translate.py @@ -23,7 +23,7 @@ async def translate(input: FunctionInputParams): print(messages) messages.append({"role": "system", "content": "To each output in the end add a line 'Helped By Restack AI'"}) response = client.chat.completions.create( - model="gpt-4o-mini", + model="gpt-4.1-mini", messages=messages, temperature=0.0 ) diff --git a/community/e2b/src/workflows/code_execution.py b/community/e2b/src/workflows/code_execution.py index 50529491..5ce76eea 100644 --- a/community/e2b/src/workflows/code_execution.py +++ b/community/e2b/src/workflows/code_execution.py @@ -21,7 +21,7 @@ async def run(self, input: CodeExecutionWorkflowInput) -> CodeExecutionWorkflowO messages = [] while True: llm_response = await workflow.step(openai_tool_call, input=OpenaiToolCallInput( - model="gpt-4o-mini", + model="gpt-4.1-mini", user_content=input.user_content if not messages else None, system_content=input.system_content if not messages else None, messages=messages, diff --git a/community/livekit_opentelemetry/src/pipeline.py b/community/livekit_opentelemetry/src/pipeline.py index 19111a7b..9238e9b5 100644 --- a/community/livekit_opentelemetry/src/pipeline.py +++ b/community/livekit_opentelemetry/src/pipeline.py @@ -112,7 +112,7 @@ async def entrypoint(ctx: JobContext) -> None: vad=ctx.proc.userdata["vad"], stt=deepgram.STT(), llm=openai.LLM( - # model="gpt-4o-mini", + # model="gpt-4.1-mini", # api_key=os.environ.get("OPENAI_API_KEY"), api_key=f"{agent_id}-livekit", base_url=agent_url, diff --git a/community/openai_greet/src/functions/function.py b/community/openai_greet/src/functions/function.py index 886a5bfc..976bb011 100644 --- a/community/openai_greet/src/functions/function.py +++ b/community/openai_greet/src/functions/function.py @@ -24,7 +24,7 @@ async def openai_greet(input: FunctionInputParams) -> str: messages.append({"role": "user", "content": input.user_content}) response = client.chat.completions.create( - model=input.model or "gpt-4o-mini", + model=input.model or "gpt-4.1-mini", messages=messages, response_format={ "json_schema": { diff --git a/community/re_act/src/functions/decide.py b/community/re_act/src/functions/decide.py index 8205d588..ec32a22e 100644 --- a/community/re_act/src/functions/decide.py +++ b/community/re_act/src/functions/decide.py @@ -39,7 +39,7 @@ async def decide(input: DecideInput): ] response = client.chat.completions.create( - model="gpt-4o-mini", + model="gpt-4.1-mini", messages=[ { "role": "system", diff --git a/community/re_act/src/functions/generate_email_content.py b/community/re_act/src/functions/generate_email_content.py index c8056020..41563fe1 100644 --- a/community/re_act/src/functions/generate_email_content.py +++ b/community/re_act/src/functions/generate_email_content.py @@ -17,7 +17,7 @@ async def generate_email_content(input: GenerateEmailInput): client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY")) response = client.chat.completions.create( - model="gpt-4o-mini", + model="gpt-4.1-mini", messages=[ { "role": "system", diff --git a/pdf_ocr/src/functions/openai_chat.py b/pdf_ocr/src/functions/openai_chat.py index 4138ae55..55bc97fd 100644 --- a/pdf_ocr/src/functions/openai_chat.py +++ b/pdf_ocr/src/functions/openai_chat.py @@ -28,7 +28,7 @@ async def openai_chat(input: OpenAiChatInput) -> str: messages.append({"role": "user", "content": input.user_content}) response = client.chat.completions.create( - model=input.model or "gpt-4o-mini", + model=input.model or "gpt-4.1-mini", messages=messages ) log.info("openai_chat function completed", response=response) diff --git a/pdf_ocr/src/workflows/pdf.py b/pdf_ocr/src/workflows/pdf.py index e812eedb..edaedce2 100644 --- a/pdf_ocr/src/workflows/pdf.py +++ b/pdf_ocr/src/workflows/pdf.py @@ -34,7 +34,7 @@ async def run(self, input: PdfWorkflowInput): function=openai_chat, function_input=OpenAiChatInput( user_content=f"Make a summary of that PDF. Here is the OCR result: {ocr_result}", - model="gpt-4o-mini" + model="gpt-4.1-mini" ), start_to_close_timeout=timedelta(seconds=120) )