Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix to fail to save the data to cache #224

Merged
merged 1 commit into from
Apr 17, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion gptcache/adapter/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ def llm_handler(cls, *llm_args, **llm_kwargs):
@staticmethod
def update_cache_callback(llm_data, update_cache_func):
if not isinstance(llm_data, Iterator):
update_cache_func(Answer(get_message_from_openai_answer(llm_data)), AnswerType.STR)
update_cache_func(Answer(get_message_from_openai_answer(llm_data), AnswerType.STR))
return llm_data
else:

Expand Down
1 change: 1 addition & 0 deletions gptcache/utils/response.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import base64
import requests


def get_message_from_openai_answer(openai_resp):
return openai_resp["choices"][0]["message"]["content"]

Expand Down
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ def parse_requirements(file_name: str) -> List[str]:
setuptools.setup(
name="gptcache",
packages=find_packages(),
version="0.1.13",
version="0.1.14",
author="SimFG",
author_email="bang.fu@zilliz.com",
description="GPTCache, a powerful caching library that can be used to speed up and lower the cost of chat "
Expand Down
153 changes: 80 additions & 73 deletions tests/unit_tests/adapter/test_openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
get_image_from_openai_b64,
get_image_from_path,
get_image_from_openai_url,
get_audio_text_from_openai_answer
get_audio_text_from_openai_answer,
)
from gptcache.adapter import openai
from gptcache import cache
Expand All @@ -17,14 +17,56 @@
import base64
from urllib.request import urlopen
from io import BytesIO

try:
from PIL import Image
except ModuleNotFoundError:
from gptcache.utils.dependency_control import prompt_install

prompt_install("pillow")
from PIL import Image


def test_normal_openai():
cache.init()
question = "calculate 1+3"
expect_answer = "the result is 4"
with patch("openai.ChatCompletion.create") as mock_create:
datas = {
"choices": [
{
"message": {"content": expect_answer, "role": "assistant"},
"finish_reason": "stop",
"index": 0,
}
],
"created": 1677825464,
"id": "chatcmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion.chunk",
}
mock_create.return_value = datas

response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
)

assert get_message_from_openai_answer(response) == expect_answer, response

response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
)
answer_text = get_message_from_openai_answer(response)
assert answer_text == expect_answer, answer_text


def test_stream_openai():
cache.init()
Expand Down Expand Up @@ -106,42 +148,29 @@ def test_completion():

with patch("openai.Completion.create") as mock_create:
mock_create.return_value = {
"choices": [
{"text": expect_answer,
"finish_reason": None,
"index": 0}
],
"choices": [{"text": expect_answer, "finish_reason": None, "index": 0}],
"created": 1677825464,
"id": "cmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD",
"model": "text-davinci-003",
"object": "text_completion",
}

response = openai.Completion.create(
model="text-davinci-003",
prompt=question
)
response = openai.Completion.create(model="text-davinci-003", prompt=question)
answer_text = get_text_from_openai_answer(response)
assert answer_text == expect_answer

response = openai.Completion.create(
model="text-davinci-003",
prompt=question
)
response = openai.Completion.create(model="text-davinci-003", prompt=question)
answer_text = get_text_from_openai_answer(response)
assert answer_text == expect_answer


def test_image_create():
cache.init(pre_embedding_func=get_prompt)
prompt1 = "test url"# bytes
test_url = "https://raw.githubusercontent.com/zilliztech/GPTCache/dev/docs/GPTCache.png"
test_response = {
"created": 1677825464,
"data": [
{"url": test_url}
]
}
prompt1 = "test url" # bytes
test_url = (
"https://raw.githubusercontent.com/zilliztech/GPTCache/dev/docs/GPTCache.png"
)
test_response = {"created": 1677825464, "data": [{"url": test_url}]}
prompt2 = "test base64"
img_bytes = base64.b64decode(get_image_from_openai_url(test_response))
img_file = BytesIO(img_bytes) # convert image to file-like object
Expand All @@ -155,49 +184,37 @@ def test_image_create():
with patch("openai.Image.create") as mock_create_b64:
mock_create_b64.return_value = {
"created": 1677825464,
"data": [
{'b64_json': expected_img_data}
]
}
"data": [{"b64_json": expected_img_data}],
}

response = openai.Image.create(
prompt=prompt1,
size="256x256",
response_format="b64_json"
prompt=prompt1, size="256x256", response_format="b64_json"
)
img_returned = get_image_from_openai_b64(response)
assert img_returned == expected_img_data

response = openai.Image.create(
prompt=prompt1,
size="256x256",
response_format="b64_json"
)
prompt=prompt1, size="256x256", response_format="b64_json"
)
img_returned = get_image_from_openai_b64(response)
assert img_returned == expected_img_data

###### Return url ######
with patch("openai.Image.create") as mock_create_url:
mock_create_url.return_value = {
"created": 1677825464,
"data": [
{'url': test_url}
]
}
"data": [{"url": test_url}],
}

response = openai.Image.create(
prompt=prompt2,
size="256x256",
response_format="url"
prompt=prompt2, size="256x256", response_format="url"
)
answer_url = response["data"][0]["url"]
assert test_url == answer_url

response = openai.Image.create(
prompt=prompt2,
size="256x256",
response_format="url"
)
prompt=prompt2, size="256x256", response_format="url"
)
img_returned = get_image_from_path(response)
assert img_returned == expected_img_data
os.remove(response["data"][0]["url"])
Expand All @@ -207,52 +224,42 @@ def test_audio_transcribe():
cache.init(pre_embedding_func=get_file_bytes)
url = "https://github.com/towhee-io/examples/releases/download/data/blues.00000.mp3"
audio_file = urlopen(url)
expect_answer = "One bourbon, one scotch and one bill Hey Mr. Bartender, come here I want another drink and I want it now My baby she gone, " \
"she been gone tonight I ain't seen my baby since night of her life One bourbon, one scotch and one bill"
expect_answer = (
"One bourbon, one scotch and one bill Hey Mr. Bartender, come here I want another drink and I want it now My baby she gone, "
"she been gone tonight I ain't seen my baby since night of her life One bourbon, one scotch and one bill"
)

with patch("openai.Audio.transcribe") as mock_create:
mock_create.return_value = {
"text": expect_answer
}
mock_create.return_value = {"text": expect_answer}

response = openai.Audio.transcribe(
model="whisper-1",
file=audio_file
)
response = openai.Audio.transcribe(model="whisper-1", file=audio_file)
answer_text = get_audio_text_from_openai_answer(response)
assert answer_text == expect_answer

response = openai.Audio.transcribe(
model="whisper-1",
file=audio_file
)
response = openai.Audio.transcribe(model="whisper-1", file=audio_file)
answer_text = get_audio_text_from_openai_answer(response)
assert answer_text == expect_answer


def test_audio_translate():
cache.init(pre_embedding_func=get_file_bytes,
data_manager=get_data_manager(data_path="data_map1.txt"))
cache.init(
pre_embedding_func=get_file_bytes,
data_manager=get_data_manager(data_path="data_map1.txt"),
)
url = "https://github.com/towhee-io/examples/releases/download/data/blues.00000.mp3"
audio_file = urlopen(url)
expect_answer = "One bourbon, one scotch and one bill Hey Mr. Bartender, come here I want another drink and I want it now My baby she gone, " \
"she been gone tonight I ain't seen my baby since night of her life One bourbon, one scotch and one bill"
expect_answer = (
"One bourbon, one scotch and one bill Hey Mr. Bartender, come here I want another drink and I want it now My baby she gone, "
"she been gone tonight I ain't seen my baby since night of her life One bourbon, one scotch and one bill"
)

with patch("openai.Audio.translate") as mock_create:
mock_create.return_value = {
"text": expect_answer
}
mock_create.return_value = {"text": expect_answer}

response = openai.Audio.translate(
model="whisper-1",
file=audio_file
)
response = openai.Audio.translate(model="whisper-1", file=audio_file)
answer_text = get_audio_text_from_openai_answer(response)
assert answer_text == expect_answer

response = openai.Audio.translate(
model="whisper-1",
file=audio_file
)
response = openai.Audio.translate(model="whisper-1", file=audio_file)
answer_text = get_audio_text_from_openai_answer(response)
assert answer_text == expect_answer