-
Notifications
You must be signed in to change notification settings - Fork 813
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Add Support for OpenAI Assistants in SAML (#755)
* fix duplicate editor instances * fix typo * improve logging - adding stack trace * refactor openai assistant * uploading data files to openai assistants api * fix list workflows endpoint * added openai assistant * saving tools and datasources in yaml * add autocompelete for openai assistant tool names * refactored code * add deprecation alert in agents page * fix metadata json errors * revert agent chat ui page * add agent icon to sidebar * Small tweaks * Minor tweaks * Small tweaks * fix openai assistant's not ending stream * fix deleting all agents * fix the build type error * Small tweak --------- Co-authored-by: Ismail Pelaseyed <homanp@gmail.com>
- Loading branch information
1 parent
18f00e5
commit 6e9df80
Showing
31 changed files
with
2,276 additions
and
425 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,55 @@ | ||
import asyncio | ||
|
||
from langchain.agents import AgentExecutor | ||
from langchain.agents.openai_assistant import OpenAIAssistantRunnable | ||
from langchain.schema.messages import AIMessage | ||
from langchain.schema.output import ChatGeneration, LLMResult | ||
|
||
from app.agents.base import AgentBase | ||
|
||
|
||
class OpenAiAssistant(AgentBase): | ||
async def get_agent(self): | ||
assistant_id = self.agent_config.metadata.get("id") | ||
agent = OpenAIAssistantRunnable(assistant_id=assistant_id, as_agent=True) | ||
enable_streaming = self.enable_streaming | ||
|
||
class CustomAgentExecutor(AgentExecutor): | ||
async def ainvoke(self, *args, **kwargs): | ||
res = await super().ainvoke(*args, **kwargs) | ||
|
||
if enable_streaming: | ||
output = res.get("output").split(" ") | ||
# TODO: find a better way to get the streaming callback | ||
streaming = kwargs["config"]["callbacks"][0] | ||
await streaming.on_llm_start() | ||
|
||
# stream the tokens. after finishing, call the on_llm_end. (make sure you call it after all the tokens are streamed) | ||
# make sure to call on_llm_end after all the tokens are streamed | ||
tasks = [] | ||
|
||
for token in output: | ||
task = streaming.on_llm_new_token(token + " ") | ||
tasks.append(task) | ||
|
||
await asyncio.gather(*tasks) | ||
|
||
await streaming.on_llm_end( | ||
response=LLMResult( | ||
generations=[ | ||
[ | ||
ChatGeneration( | ||
message=AIMessage( | ||
content=res.get("output"), | ||
) | ||
) | ||
] | ||
], | ||
) | ||
) | ||
|
||
return res | ||
|
||
agent_executor = CustomAgentExecutor(agent=agent, tools=[]) | ||
|
||
return agent_executor |
Oops, something went wrong.