From 3fc6c18fe520b531259ad2eed74b1ded1fff0f5c Mon Sep 17 00:00:00 2001 From: aboutphilippe Date: Tue, 4 Mar 2025 12:24:55 +0100 Subject: [PATCH] add docs context --- agent_chat/src/agents/agent.py | 27 ++++++++++++------- .../twilio/livekit-trunk-setup/.gitignore | 1 + .../livekit-trunk-setup/inbound_trunk.json | 8 ------ agent_video/src/agents/agent.py | 9 ++++++- agent_video/src/functions/context_docs.py | 25 +++++++++++++++++ agent_video/src/services.py | 2 ++ 6 files changed, 53 insertions(+), 19 deletions(-) delete mode 100644 agent_telephony/twilio/livekit-trunk-setup/inbound_trunk.json create mode 100644 agent_video/src/functions/context_docs.py diff --git a/agent_chat/src/agents/agent.py b/agent_chat/src/agents/agent.py index 6e170aa3..2cde23be 100644 --- a/agent_chat/src/agents/agent.py +++ b/agent_chat/src/agents/agent.py @@ -1,7 +1,7 @@ from datetime import timedelta from pydantic import BaseModel -from restack_ai.agent import agent, import_functions, log +from restack_ai.agent import agent, import_functions, log, AgentError with import_functions(): from src.functions.llm_chat import LlmChatInput, Message, llm_chat @@ -23,15 +23,22 @@ def __init__(self) -> None: @agent.event async def messages(self, messages_event: MessagesEvent) -> list[Message]: - log.info(f"Received messages: {messages_event.messages}") - self.messages.extend(messages_event.messages) - assistant_message = await agent.step( - function=llm_chat, - function_input=LlmChatInput(messages=self.messages), - start_to_close_timeout=timedelta(seconds=120), - ) - self.messages.append(assistant_message) - return self.messages + try: + log.info(f"Received messages: {messages_event.messages}") + self.messages.extend(messages_event.messages) + + log.info(f"Calling llm_chat with messages: {self.messages}") + assistant_message = await agent.step( + function=llm_chat, + function_input=LlmChatInput(messages=self.messages), + start_to_close_timeout=timedelta(seconds=120), + ) + + self.messages.append(assistant_message) + return self.messages + except Exception as e: + log.error(f"Error in messages: {e}") + raise AgentError(f"Error in messages: {e}") @agent.event async def end(self, end: EndEvent) -> EndEvent: diff --git a/agent_telephony/twilio/livekit-trunk-setup/.gitignore b/agent_telephony/twilio/livekit-trunk-setup/.gitignore index d0117281..862e67c1 100644 --- a/agent_telephony/twilio/livekit-trunk-setup/.gitignore +++ b/agent_telephony/twilio/livekit-trunk-setup/.gitignore @@ -2,3 +2,4 @@ .venv .env uv.lock +inbound_trunk.json \ No newline at end of file diff --git a/agent_telephony/twilio/livekit-trunk-setup/inbound_trunk.json b/agent_telephony/twilio/livekit-trunk-setup/inbound_trunk.json deleted file mode 100644 index 115dc1ea..00000000 --- a/agent_telephony/twilio/livekit-trunk-setup/inbound_trunk.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "trunk": { - "name": "Inbound LiveKit Trunk", - "numbers": [ - "+12096194281" - ] - } -} \ No newline at end of file diff --git a/agent_video/src/agents/agent.py b/agent_video/src/agents/agent.py index d512aaa1..67fa28fb 100644 --- a/agent_video/src/agents/agent.py +++ b/agent_video/src/agents/agent.py @@ -5,7 +5,7 @@ with import_functions(): from src.functions.llm_chat import LlmChatInput, Message, llm_chat - + from src.functions.context_docs import context_docs class MessagesEvent(BaseModel): messages: list[Message] @@ -42,4 +42,11 @@ async def end(self, end: EndEvent) -> EndEvent: @agent.run async def run(self) -> None: + docs = await agent.step(function=context_docs) + system_prompt=f""" + You are an interactive video assistant, your answers will be used in text to speech so try to keep answers short and concise so that interaction is seamless. + You can answer questions about the following documentation: + {docs} + """ + self.messages.append(Message(role="system", content=system_prompt)) await agent.condition(lambda: self.end) diff --git a/agent_video/src/functions/context_docs.py b/agent_video/src/functions/context_docs.py new file mode 100644 index 00000000..47f9ca1f --- /dev/null +++ b/agent_video/src/functions/context_docs.py @@ -0,0 +1,25 @@ +from restack_ai.function import function, log +import aiohttp + + +async def fetch_content_from_url(url: str) -> str: + async with aiohttp.ClientSession() as session: + async with session.get(url) as response: + if response.status == 200: + return await response.text() + else: + log.error("Failed to fetch content", status=response.status) + raise Exception(f"Failed to fetch content: {response.status}") + + +@function.defn() +async def context_docs() -> str: + try: + docs_content = await fetch_content_from_url("https://docs.restack.io/llms-full.txt") + log.info("Fetched content from URL", content=len(docs_content)) + + return docs_content + + except Exception as e: + log.error("llm_chat function failed", error=str(e)) + raise diff --git a/agent_video/src/services.py b/agent_video/src/services.py index 57746c2b..a12cc24f 100644 --- a/agent_video/src/services.py +++ b/agent_video/src/services.py @@ -10,6 +10,7 @@ from src.functions.llm_chat import llm_chat from src.functions.pipeline import pipecat_pipeline from src.workflows.room import RoomWorkflow +from src.functions.context_docs import context_docs async def main() -> None: @@ -19,6 +20,7 @@ async def main() -> None: functions=[ llm_chat, pipecat_pipeline, + context_docs, ], )