Skip to content

Commit

Permalink
update openai version to v1
Browse files Browse the repository at this point in the history
  • Loading branch information
uni-zhuan committed Nov 11, 2023
1 parent de469e7 commit 6e93038
Show file tree
Hide file tree
Showing 37 changed files with 1,891 additions and 1,512 deletions.
31 changes: 21 additions & 10 deletions demo/animal_story.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,8 @@
import os
import time

import openai
import requests
from openai import OpenAI
from PIL import Image
from requests.adapters import HTTPAdapter # type: ignore
from requests.packages.urllib3.util.retry import Retry # type: ignore
Expand All @@ -31,19 +31,30 @@ def prompt_generation(prompt: str, style) -> str:
'''generate prompt for chatgpt based on the input'''
logger = TokenLogger()
logger.reset()
openai.api_key = os.getenv("OPENAI_API_KEY")
messages = [{"role": "user", "content": prompt}]
response = openai.ChatCompletion.create(
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
response = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=messages,
messages=[{
"role": "user",
"content": prompt
}],
temperature=1.0,
max_tokens=3000
)
res = str(
f'Drawing style: {style}, ' +
response['choices'][0]['message']['content'][:1000]
)
token_usage = response['usage']['total_tokens']

if response.choices[0].message.content is not None:
res = str(
f'Drawing style: {style}, ' +
response.choices[0].message.content[:1000]
)
else:
res = "No content found"

if response.usage is not None:
token_usage = response.usage.total_tokens
else:
token_usage = 0

logger.log(token_usage)
return res

Expand Down
28 changes: 15 additions & 13 deletions demo/auto_prompt_bot.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import os

import openai
from openai import OpenAI

from yival.logger.token_logger import TokenLogger
from yival.schemas.experiment_config import MultimodalOutput
Expand All @@ -12,25 +12,27 @@ def reply(input: str, state: ExperimentState) -> MultimodalOutput:
logger = TokenLogger()
logger.reset()
# Ensure you have your OpenAI API key set up
openai.api_key = os.getenv("OPENAI_API_KEY")
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))

# Create a chat message sequence
messages = [{
"role":
"user",
"content":
str(StringWrapper("", name="prompt", state=state)) + f'\n{input}'
}]
# Use the chat-based completion
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo", messages=messages
response = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[{
"role":
"user",
"content":
str(StringWrapper("", name="prompt", state=state)) + f'\n{input}'
}]
)

# Extract the assistant's message (translated text) from the response
answer = MultimodalOutput(
text_output=response['choices'][0]['message']['content'],
text_output=response.choices[0].message.content,
)
token_usage = response['usage']['total_tokens']
if response.usage is not None:
token_usage = response.usage.total_tokens
else:
token_usage = 0
logger.log(token_usage)

return answer
15 changes: 6 additions & 9 deletions demo/auto_reply/reply.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,11 @@
import uuid

import faiss
import openai
from langchain.docstore import InMemoryDocstore
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.schema import Document
from langchain.vectorstores import FAISS
from openai import OpenAI

from yival.logger.token_logger import TokenLogger
from yival.schemas.experiment_config import MultimodalOutput
Expand Down Expand Up @@ -95,8 +95,7 @@ def reply(self, weibo_post: str, user_input: str, state: ExperimentState):
logger = TokenLogger()
logger.reset()
# Ensure you have your OpenAI API key set up
openai.api_key = os.getenv("OPENAI_API_KEY")

client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
documents = self.retriever.get_relevant_documents(user_input)
query_context = ""
if documents:
Expand Down Expand Up @@ -140,18 +139,16 @@ def reply(self, weibo_post: str, user_input: str, state: ExperimentState):
)
) + suffix

# Create a chat message sequence
messages = [{"role": "user", "content": message}]
# Use the chat-based completion
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo", messages=messages
response = client.completions.create(
model="gpt-3.5-turbo", prompt=prompt
)

# Extract the assistant's message (translated text) from the response
answer = MultimodalOutput(
text_output=response['choices'][0]['message']['content'],
text_output=response.choices[0].message.content,
)
token_usage = response['usage']['total_tokens']
token_usage = response.usage.total_tokens
logger.log(token_usage)

return answer
Expand Down
100 changes: 100 additions & 0 deletions demo/building_design.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,100 @@
'''This script is used to generate image from a building design prompt'''

import os

from openai import OpenAI

from yival.logger.token_logger import TokenLogger
from yival.schemas.experiment_config import MultimodalOutput
from yival.states.experiment_state import ExperimentState
from yival.wrappers.string_wrapper import StringWrapper


def prompt_generation(prompt: str) -> str:
'''generate prompt for chatgpt based on the input'''
logger = TokenLogger()
logger.reset()
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
response = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[{
"role": "user",
"content": prompt
}],
temperature=1.0,
max_tokens=3000
)
if response.choices[0].message.content is not None:
res = str(response.choices[0].message.content[:1000])
else:
res = "No content found"

if response.usage is not None:
token_usage = response.usage.total_tokens
else:
token_usage = 0
logger.log(token_usage)
return res


# def load_image(response):
# '''load image from response'''
# print("[INFO] start load images")
# url = f"{BASE_URL}/getImage"
# image_urls = response['response']['imageUrls']
# image_list = []
# for image_url in image_urls:
# payload = json.dumps({"imgUrl": image_url})
# response = s.post(url, headers=HEADERS, data=payload)
# if response.status_code == 200:
# image_data = response.content
# image = Image.open(io.BytesIO(image_data))
# image_list.append(image)
# else:
# print(
# f"[Error] Failed to load image from {image_url}. Response code: {response.status_code}"
# )
# print("[INFO] Successfully load images.")

# return image_list


def building_design(location: str, function: str, state: ExperimentState):
# Ensure you have your OpenAI API key set up
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
prompt = prompt_generation(
str(
StringWrapper(
"Generate a building design for a building",
name="task",
variables={
"location": location,
"function": function,
},
state=state
)
)
)
print(f"prompt: {prompt}")
response = client.images.generate(
model="dall-e-3", prompt=prompt, n=1, size="1024x1024"
)
print(f"response: {response}")
# image_res = MultimodalOutput(
# text_output=response['response']['content'],
# image_output=response['response']['imageUrls'],
# )
# return image_res
return response


def main():
'''main function'''
location = "New York"
function = "office"
state = ExperimentState()
print(building_design(location, function, state))


if __name__ == "__main__":
main()
12 changes: 7 additions & 5 deletions demo/complete_task.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
import time

import openai
from openai import OpenAI
from tenacity import retry, stop_after_attempt, wait_random

from yival.logger.token_logger import TokenLogger
Expand All @@ -13,7 +14,10 @@

@retry(wait=wait_random(min=1, max=20), stop=stop_after_attempt(5))
def completion_with_backpff(**kwargs):
response = openai.ChatCompletion.create(**kwargs)
request_timeout = kwargs.pop("request_timeout")
openai.timeout = request_timeout
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
response = client.chat.completions.create(**kwargs)
return response


Expand Down Expand Up @@ -55,10 +59,8 @@ def complete_task(
max_tokens=1000,
request_timeout=20
)
res = MultimodalOutput(
text_output=response['choices'][0]['message']['content'],
)
token_usage = response['usage']['total_tokens']
res = MultimodalOutput(text_output=response.choices[0].message.content, )
token_usage = response.usage.total_tokens
logger.log(token_usage)
return res

Expand Down
24 changes: 11 additions & 13 deletions demo/configs/animal_story.yml
Original file line number Diff line number Diff line change
Expand Up @@ -6,19 +6,18 @@ dataset:
chunk_size: 1000
diversify: true
prompt:
"Please provide a concrete and realistic test case as a dictionary for function invocation using the ** operator.
Only include parameters, excluding description and name.
Ensure it's succinct and well-structured.
**Only provide the dictionary.**"
"Please provide a concrete and realistic test case as a dictionary for function invocation using the ** operator.
Only include parameters, excluding description and name.
Ensure it's succinct and well-structured.
**Only provide the dictionary.**"
input_function:
description:
Given the species of an animal and its character, generate a corresponding story
description: Given the species of an animal and its character, generate a corresponding story
name: animal_story_generation
parameters:
species: str
character: str
drawing_style: str
number_of_examples: 2
number_of_examples: 1
model_name: gpt-4
source_type: machine_generated

Expand All @@ -27,11 +26,11 @@ variations:
generator_name: openai_prompt_based_variation_generator
generator_config:
model_name: gpt-4
number_of_variations: 2
number_of_variations: 1
diversify: false
max_tokens: 2000
variables: null
prompt:
prompt:
- content: |-
Your object is to construct a concise instruction prompt for GPT-4. This prompt will instruct GPT-4 as a gentle, imaginative children's writer to write all kinds of cute, kid-friendly stories based on animal species and personalities for an audience of YOUNG ADULTS and TEENAGERS.
Points to emphasize in your instruction:
Expand Down Expand Up @@ -87,7 +86,6 @@ human_rating_configs:
instructions: Rate the quality of the generated image.
scale: [1, 5]


selection_strategy:
ahp_selection:
criteria:
Expand All @@ -107,12 +105,12 @@ selection_strategy:
enhancer:
name: "optimize_by_prompt_enhancer"
model_name: "gpt-4"
max_iterations: 2
max_iterations: 1
enhance_var: ["task"]
head_meta_instruction: |
Now you will help me generate a prompt which is used to generate a corresponding
story according to the species of an animal which is [animal_species] and its character [animal_character].
I already have some prompt and its evaluation results :
end_meta_instruction: |
Give me a new prompt that is different from all pairs above, and has a evaluation value higher than any of above.
Give me a new prompt that is different from all pairs above, and has a evaluation value higher than any of above.
Loading

0 comments on commit 6e93038

Please sign in to comment.