/
streamlit_app.py
106 lines (84 loc) · 3.29 KB
/
streamlit_app.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
# Conversational Retrieval QA Chatbot, built using Langflow and Streamlit
# Author: Gary A. Stafford
# Date: 2023-07-31
# Usage: streamlit run streamlit_app.py
# Requirements: pip install streamlit streamlit_chat -Uq
import logging
import sys
from typing import Optional
import requests
import streamlit as st
from streamlit_chat import message
log_format = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
logging.basicConfig(format=log_format, stream=sys.stdout, level=logging.INFO)
BASE_API_URL = "http://localhost:7860/api/v1/process"
BASE_AVATAR_URL = "https://raw.githubusercontent.com/garystafford/build-chatbot-with-langflow/main/static"
# ***** REPLACE THE FOLLOWING LINES OF CODE *****
FLOW_ID = "cd2c3ce5-0488-4f4e-86b1-2f3c92216fc5"
TWEAKS = {
"Chroma-ONPt3": {},
"ConversationalRetrievalChain-dQ4f5": {},
"ConversationBufferMemory-NbiVC": {},
"HuggingFaceEmbeddings-xhPRo": {},
"ChatOpenAI-UERtz": {},
}
# ************************************************
def main():
st.set_page_config(page_title="Virtual Sommelier")
st.markdown("##### Welcome to the Virtual Sommelier")
if "messages" not in st.session_state:
st.session_state.messages = []
for message in st.session_state.messages:
with st.chat_message(message["role"], avatar=message["avatar"]):
st.write(message["content"])
if prompt := st.chat_input("I'm your virtual Sommelier, how may I help you?"):
# Add user message to chat history
st.session_state.messages.append(
{
"role": "user",
"content": prompt,
"avatar": f"{BASE_AVATAR_URL}/people-64px.png",
}
)
# Display user message in chat message container
with st.chat_message(
"user",
avatar=f"{BASE_AVATAR_URL}/people-64px.png",
):
st.write(prompt)
# Display assistant response in chat message container
with st.chat_message(
"assistant",
avatar=f"{BASE_AVATAR_URL}/sommelier-64px.png",
):
message_placeholder = st.empty()
with st.spinner(text="Thinking..."):
assistant_response = generate_response(prompt)
message_placeholder.write(assistant_response)
# Add assistant response to chat history
st.session_state.messages.append(
{
"role": "assistant",
"content": assistant_response,
"avatar": f"{BASE_AVATAR_URL}/sommelier-64px.png",
}
)
def run_flow(inputs: dict, flow_id: str, tweaks: Optional[dict] = None) -> dict:
api_url = f"{BASE_API_URL}/{flow_id}"
payload = {"inputs": inputs}
if tweaks:
payload["tweaks"] = tweaks
response = requests.post(api_url, json=payload)
return response.json()
def generate_response(prompt):
logging.info(f"question: {prompt}")
inputs = {"question": prompt}
response = run_flow(inputs, flow_id=FLOW_ID, tweaks=TWEAKS)
try:
logging.info(f"answer: {response['result']['answer']}")
return response["result"]["answer"]
except Exception as exc:
logging.error(f"error: {response}")
return "Sorry, there was a problem finding an answer for you."
if __name__ == "__main__":
main()