/
text2text_quickstart.py
106 lines (72 loc) · 2.77 KB
/
text2text_quickstart.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
#!/usr/bin/env python
# coding: utf-8
# # No Framework Quickstart
#
# In this quickstart you will create a simple text to text application and learn how to log it and get feedback.
#
# [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/truera/trulens/blob/main/trulens_eval/examples/no_framework_quickstart.ipynb)
# ## Setup
# ### Add API keys
# For this quickstart you will need Open AI and Huggingface keys
import os
os.environ["OPENAI_API_KEY"] = "..."
os.environ["HUGGINGFACE_API_KEY"] = "..."
import openai
openai.api_key = os.environ["OPENAI_API_KEY"]
# ### Import from TruLens
# Imports main tools:
from trulens_eval import Feedback
from trulens_eval import Huggingface
from trulens_eval import Tru
tru = Tru()
# ### Create Simple Text to Text Application
#
# This example uses a bare bones OpenAI LLM, and a non-LLM just for demonstration purposes.
def llm_standalone(prompt):
return openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{
"role":
"system",
"content":
"You are a question and answer bot, and you answer super upbeat."
}, {
"role": "user",
"content": prompt
}
]
)["choices"][0]["message"]["content"]
import hashlib
def simple_hash_callable(prompt):
h = hashlib.shake_256(prompt.encode('utf-8'))
return str(h.hexdigest(20))
# ### Send your first request
prompt_input = "How good is language AI?"
prompt_output = llm_standalone(prompt_input)
prompt_output
simple_hash_callable(prompt_input)
# ## Initialize Feedback Function(s)
# Initialize Huggingface-based feedback function collection class:
hugs = Huggingface()
# Define a sentiment feedback function using HuggingFace.
f_sentiment = Feedback(hugs.positive_sentiment).on_output()
# ## Instrument the callable for logging with TruLens
from trulens_eval import TruBasicApp
tru_llm_standalone_recorder = TruBasicApp(
llm_standalone, app_id="Happy Bot", feedbacks=[f_sentiment]
)
tru_simple_hash_callable_recorder = TruBasicApp(
simple_hash_callable, app_id="Hasher", feedbacks=[f_sentiment]
)
with tru_llm_standalone_recorder as recording:
llm_standalone(prompt_input)
with tru_simple_hash_callable_recorder as recording:
simple_hash_callable(prompt_input)
# ## Explore in a Dashboard
tru.run_dashboard() # open a local streamlit app to explore
# tru.stop_dashboard() # stop if needed
# Alternatively, you can run `trulens-eval` from a command line in the same folder to start the dashboard.
# ## Or view results directly in your notebook
tru.get_records_and_feedback(app_ids=[]
)[0] # pass an empty list of app_ids to get all