Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

#38 Groundwork for migration to new openai >=v1.0.0 python API #41

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion factscore/atomic_facts.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@
import spacy
import sys
import nltk
import openai
from rank_bm25 import BM25Okapi
import os
import time
Expand Down
52 changes: 30 additions & 22 deletions factscore/openai_lm.py
Original file line number Diff line number Diff line change
@@ -1,16 +1,18 @@
from factscore.lm import LM
import openai
from openai import OpenAI, BadRequestError
import sys
import time
import os
import numpy as np
import logging


class OpenAIModel(LM):

def __init__(self, model_name, cache_file=None, key_path="api.key"):
self.model_name = model_name
self.key_path = key_path
self.client = None # Initialized with load_model() method
self.temp = 0.7
self.save_interval = 100
super().__init__(cache_file)
Expand All @@ -21,7 +23,7 @@ def load_model(self):
assert os.path.exists(key_path), f"Please place your OpenAI APT Key in {key_path}."
with open(key_path, 'r') as f:
api_key = f.readline()
openai.api_key = api_key.strip()
self.client = OpenAI(api_key=api_key.strip())
self.model = self.model_name

def _generate(self, prompt, max_sequence_length=2048, max_output_length=128):
Expand All @@ -33,65 +35,71 @@ def _generate(self, prompt, max_sequence_length=2048, max_output_length=128):
# Construct the prompt send to ChatGPT
message = [{"role": "user", "content": prompt}]
# Call API
response = call_ChatGPT(message, temp=self.temp, max_len=max_sequence_length)
response = call_ChatGPT(message, self.client, temp=self.temp, max_len=max_sequence_length)
# Get the output from the response
output = response["choices"][0]["message"]["content"]
return output, response
elif self.model_name == "InstructGPT":
# Call API
response = call_GPT3(prompt, temp=self.temp)
response = call_GPT3(prompt, self.client, temp=self.temp)
# Get the output from the response
output = response["choices"][0]["text"]
output = response.choices[0].message.content
return output, response
else:
raise NotImplementedError()

def call_ChatGPT(message, model_name="gpt-3.5-turbo", max_len=1024, temp=0.7, verbose=False):

def call_ChatGPT(message, openai_client, model_name="gpt-3.5-turbo", max_len=1024, temp=0.7, verbose=False):
# call GPT-3 API until result is provided and then return it
response = None
received = False
num_rate_errors = 0
while not received:
try:
response = openai.ChatCompletion.create(model=model_name,
messages=message,
max_tokens=max_len,
temperature=temp)
response = openai_client.chat.completions.create(
model=model_name,
messages=message,
max_tokens=max_len,
temperature=temp)
received = True
except:
# print(message)
num_rate_errors += 1
error = sys.exc_info()[0]
if error == openai.error.InvalidRequestError:
if error == BadRequestError:
# something is wrong: e.g. prompt too long
logging.critical(f"InvalidRequestError\nPrompt passed in:\n\n{message}\n\n")
logging.critical(f"BadRequestError\nPrompt passed in:\n\n{message}\n\n")
assert False

logging.error("API error: %s (%d). Waiting %dsec" % (error, num_rate_errors, np.power(2, num_rate_errors)))
time.sleep(np.power(2, num_rate_errors))
return response


def call_GPT3(prompt, model_name="text-davinci-003", max_len=512, temp=0.7, num_log_probs=0, echo=False, verbose=False):
def call_GPT3(prompt, openai_client, model_name="gpt-3.5-turbo-0125", max_len=512, temp=0.7, num_log_probs=0):
# call GPT-3 API until result is provided and then return it
response = None
received = False
num_rate_errors = 0
while not received:
try:
response = openai.Completion.create(model=model_name,
prompt=prompt,
max_tokens=max_len,
temperature=temp,
logprobs=num_log_probs,
echo=echo)
response = openai_client.chat.completions.create(
model=model_name,
messages=[
{"role": "user", "content": prompt},
],
max_tokens=max_len,
temperature=temp,
logprobs=num_log_probs > 0, # Needs to be True if num_log_probs > 0
top_logprobs=num_log_probs,
)
received = True
except:
error = sys.exc_info()[0]
num_rate_errors += 1
if error == openai.error.InvalidRequestError:
if error == BadRequestError:
# something is wrong: e.g. prompt too long
logging.critical(f"InvalidRequestError\nPrompt passed in:\n\n{prompt}\n\n")
logging.critical(f"BadRequestError\nPrompt passed in:\n\n{prompt}\n\n")
assert False
logging.error("API error: %s (%d)" % (error, num_rate_errors))
time.sleep(np.power(2, num_rate_errors))
Expand Down
6 changes: 3 additions & 3 deletions preprocessing/preprocess_acl.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import pandas as pd
import tqdm
import json
import openai
from openai import OpenAI
from factscore.openai_lm import call_ChatGPT
from factscore.factscorer import FactScorer

Expand Down Expand Up @@ -49,12 +49,12 @@

with open("api.key", 'r') as f:
api_key = f.readline()
openai.api_key = api_key.strip()
openai_client = OpenAI(api_key=api_key.strip())

responses = []
for ptitle, prompt in tqdm.tqdm(zip(prompt_titles, prompts_list)):
message = [{"role": "user", "content": prompt}]
response = call_ChatGPT(message, model_name="gpt-3.5-turbo-0301")
response = call_ChatGPT(message, openai_client, model_name="gpt-3.5-turbo-0301")
responses.append({
"topic": ptitle,
"output": response["choices"][0]["message"]["content"]
Expand Down