Skip to content

Commit

Permalink
Merge pull request #585 from microsoft/wotey/Tutorfix
Browse files Browse the repository at this point in the history
Wotey/tutorfix
  • Loading branch information
georearl committed Mar 21, 2024
2 parents 2acd606 + 994fda8 commit 801b869
Showing 1 changed file with 3 additions and 25 deletions.
28 changes: 3 additions & 25 deletions app/backend/approaches/MathTutor.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@
#
#______________________________________________________________________________________


OPENAI_DEPLOYMENT_NAME = azure_openai_chatgpt_deployment
from langchain.chat_models import AzureChatOpenAI
from langchain.schema import HumanMessage
from langchain.agents import initialize_agent, load_tools
Expand All @@ -55,7 +55,7 @@

model = AzureChatOpenAI(
openai_api_version=OPENAI_API_VERSION ,
deployment_name=azure_openai_chatgpt_deployment)
deployment_name=OPENAI_DEPLOYMENT_NAME)

#--------------------------------------------------------------------------------------------------------------------------------------------------
# Addition of custom tools
Expand Down Expand Up @@ -151,15 +151,6 @@ def _arun(self, radius: int):

# function to stream agent response
def process_agent_scratch_pad( question):
zero_shot_agent_math = initialize_agent(
agent="zero-shot-react-description",
tools=tools,
llm=model,
verbose=True,
max_iterations=10,
max_execution_time=120,
handle_parsing_errors=True,
return_intermediate_steps=True)
messages = []
for chunk in zero_shot_agent_math.stream({"input": question}):
if "actions" in chunk:
Expand All @@ -177,27 +168,14 @@ def process_agent_scratch_pad( question):

#Function to stream final output
def process_agent_response( question):
zero_shot_agent_math = initialize_agent(
agent="zero-shot-react-description",
tools=tools,
llm=model,
verbose=True,
max_iterations=10,
max_execution_time=120,
handle_parsing_errors=True,
return_intermediate_steps=True)
stream = zero_shot_agent_math.stream({"input": question})
output = "No output"
if stream:
for chunk in stream:
if "output" in chunk:
output = f'Final Output: {chunk["output"]}'
else:
return {"data": "No output"}
return output
# for chunk in zero_shot_agent_math.stream({"input": question}):
# if "output" in chunk:
# yield {"data": f'Final Output: {chunk["output"]}'}


#Function to process clues
def generate_response(question):
Expand Down

0 comments on commit 801b869

Please sign in to comment.