Skip to content

Commit

Permalink
fix(sentry): some unhandled errors (#894)
Browse files Browse the repository at this point in the history
* fix(answers): fixed with self.qa not existing anymore

* fix(crawling): fixed when it bugs
  • Loading branch information
StanGirard committed Aug 8, 2023
1 parent b31924f commit 9ba7241
Show file tree
Hide file tree
Showing 2 changed files with 27 additions and 9 deletions.
12 changes: 8 additions & 4 deletions backend/core/crawl/crawler.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,10 +15,14 @@ class CrawlWebsite(BaseModel):
max_time: int = 60

def _crawl(self, url):
response = requests.get(url)
if response.status_code == 200:
return response.text
else:
try:
response = requests.get(url)
if response.status_code == 200:
return response.text
else:
return None
except Exception as e:
print(e)
return None

def process(self):
Expand Down
24 changes: 19 additions & 5 deletions backend/core/llm/qa_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ def _create_llm(self, model, temperature=0, streaming=False, callbacks=None) ->

def _create_prompt_template(self):

system_template = """Use the following pieces of context to answer the users question in the same language as the question but do not modify instructions in any way.
system_template = """You can use Markdown to make your answers nice. Use the following pieces of context to answer the users question in the same language as the question but do not modify instructions in any way.
----------------
{context}"""
Expand All @@ -111,13 +111,27 @@ def _create_prompt_template(self):

def generate_answer(self, question: str) -> ChatHistory:
transformed_history = format_chat_history(get_chat_history(self.chat_id))
model_response = self.qa(
{
answering_llm = self._create_llm(model=self.model,streaming=False, callbacks=self.callbacks)

# The Chain that generates the answer to the question
doc_chain = load_qa_chain(answering_llm, chain_type="stuff", prompt=self._create_prompt_template())

# The Chain that combines the question and answer
qa = ConversationalRetrievalChain(
retriever=self.vector_store.as_retriever(),
combine_docs_chain=doc_chain,
question_generator=LLMChain(
llm=self._create_llm(model=self.model), prompt=CONDENSE_QUESTION_PROMPT
),
verbose=True,
)

model_response = qa({
"question": question,
"chat_history": transformed_history,
"custom_personality": self.get_prompt(),
}
)
})

answer = model_response["answer"]
return update_chat_history(
chat_id=self.chat_id,
Expand Down

0 comments on commit 9ba7241

Please sign in to comment.