Skip to content

Commit

Permalink
fix(openai): user key now used for llm model
Browse files Browse the repository at this point in the history
  • Loading branch information
StanGirard committed Aug 1, 2023
1 parent 33481eb commit c01433c
Show file tree
Hide file tree
Showing 4 changed files with 8 additions and 11 deletions.
3 changes: 2 additions & 1 deletion backend/core/llm/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,8 @@ def generate_answer(self, question: str) -> str:
This function should also call: _create_qa, get_chat_history and format_chat_history.
It should also update the chat_history in the DB.
"""



async def generate_stream(self, question: str) -> AsyncIterable:
"""
Generate a streaming answer to a given question using QA Chain.
Expand Down
5 changes: 3 additions & 2 deletions backend/core/llm/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ def embeddings(self) -> OpenAIEmbeddings:
openai_api_key=self.openai_api_key
) # pyright: ignore reportPrivateUsage=none

def _create_llm(self, model, streaming=False, callbacks=None) -> BaseLLM:
def _create_llm(self, model, temperature=0, streaming=False, callbacks=None) -> BaseLLM:
"""
Determine the language model to be used.
:param model: Language model name to be used.
Expand All @@ -55,9 +55,10 @@ def _create_llm(self, model, streaming=False, callbacks=None) -> BaseLLM:
:return: Language model instance
"""
return ChatOpenAI(
temperature=self.temperature,
temperature=temperature,
model=model,
streaming=streaming,
verbose=True,
callbacks=callbacks,
openai_api_key=self.openai_api_key,
) # pyright: ignore reportPrivateUsage=none
9 changes: 2 additions & 7 deletions backend/core/llm/qa_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@
from repository.chat.update_chat_history import update_chat_history
from supabase.client import Client, create_client
from vectorstore.supabase import CustomSupabaseVectorStore
from langchain.chat_models import ChatOpenAI
from repository.chat.update_message_by_id import update_message_by_id
import json

Expand Down Expand Up @@ -175,12 +174,8 @@ async def generate_stream(self, question: str) -> AsyncIterable:
callback = self.callbacks[0]
callback = AsyncIteratorCallbackHandler()
self.callbacks = [callback]
model = ChatOpenAI(
streaming=True,
verbose=True,
callbacks=[callback],
)
llm = ChatOpenAI(temperature=0)
model = self._create_llm(model=self.model, streaming=True, callbacks=self.callbacks)
llm = self._create_llm(model=self.model,temperature=self.temperature)
question_generator = LLMChain(llm=llm, prompt=CONDENSE_QUESTION_PROMPT)
doc_chain = load_qa_chain(model, chain_type="stuff")
qa = ConversationalRetrievalChain(
Expand Down
2 changes: 1 addition & 1 deletion frontend/lib/config/CONSTANTS.ts
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
export const GITHUB_URL = "https://github.com/stangirard/quivr";
export const TWITTER_URL = "https://twitter.com/quivr_brain";
export const DISCORD_URL = "https://discord.gg/HUpRgp2HG8";
export const DEFAULT_BACKEND_URL = "http://localhost:5000";
export const DEFAULT_BACKEND_URL = "http://localhost:5050";

0 comments on commit c01433c

Please sign in to comment.