Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Wotey/tutorfix #585

Merged
merged 4 commits into from
Mar 21, 2024
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
28 changes: 3 additions & 25 deletions app/backend/approaches/MathTutor.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@
#
#______________________________________________________________________________________


OPENAI_DEPLOYMENT_NAME = azure_openai_chatgpt_deployment
from langchain.chat_models import AzureChatOpenAI
from langchain.schema import HumanMessage
from langchain.agents import initialize_agent, load_tools
Expand All @@ -55,7 +55,7 @@

model = AzureChatOpenAI(
openai_api_version=OPENAI_API_VERSION ,
deployment_name=azure_openai_chatgpt_deployment)
deployment_name=OPENAI_DEPLOYMENT_NAME)

#--------------------------------------------------------------------------------------------------------------------------------------------------
# Addition of custom tools
Expand Down Expand Up @@ -151,15 +151,6 @@ def _arun(self, radius: int):

# function to stream agent response
def process_agent_scratch_pad( question):
zero_shot_agent_math = initialize_agent(
agent="zero-shot-react-description",
tools=tools,
llm=model,
verbose=True,
max_iterations=10,
max_execution_time=120,
handle_parsing_errors=True,
return_intermediate_steps=True)
messages = []
for chunk in zero_shot_agent_math.stream({"input": question}):
if "actions" in chunk:
Expand All @@ -177,27 +168,14 @@ def process_agent_scratch_pad( question):

#Function to stream final output
def process_agent_response( question):
zero_shot_agent_math = initialize_agent(
agent="zero-shot-react-description",
tools=tools,
llm=model,
verbose=True,
max_iterations=10,
max_execution_time=120,
handle_parsing_errors=True,
return_intermediate_steps=True)
stream = zero_shot_agent_math.stream({"input": question})
output = "No output"
if stream:
for chunk in stream:
if "output" in chunk:
output = f'Final Output: {chunk["output"]}'
else:
return {"data": "No output"}
return output
# for chunk in zero_shot_agent_math.stream({"input": question}):
# if "output" in chunk:
# yield {"data": f'Final Output: {chunk["output"]}'}


#Function to process clues
def generate_response(question):
Expand Down
Loading