Skip to content

Commit

Permalink
feat: Updated dsp/evaluation/utils.py
Browse files Browse the repository at this point in the history
  • Loading branch information
sweep-ai[bot] committed Dec 23, 2023
1 parent fb1691c commit 6220e7d
Showing 1 changed file with 13 additions and 6 deletions.
19 changes: 13 additions & 6 deletions dsp/evaluation/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,12 +9,12 @@
from dsp.utils import EM, F1, HotPotF1


def evaluateRetrieval(fn, dev, metric=None):
def evaluateRetrieval(fn, openai_predict_fn, dev, metric=None):
data = []

for example in tqdm.tqdm(dev):
question = example.question
prediction = fn(question)
prediction = openai_predict_fn(question)

d = dict(example)

Expand All @@ -32,12 +32,12 @@ def evaluateRetrieval(fn, dev, metric=None):
display(df.style.set_table_styles([{'selector': 'th', 'props': [('text-align', 'left')]}, {'selector': 'td', 'props': [('text-align', 'left')]}]))


def evaluateAnswer(fn, dev, metric=EM):
def evaluateAnswer(fn, openai_predict_fn, dev, metric=EM):
data = []

for example in tqdm.tqdm(dev):
question = example.question
prediction = fn(question)
prediction = openai_predict_fn(question)

d = dict(example)

Expand All @@ -58,12 +58,12 @@ def evaluateAnswer(fn, dev, metric=EM):



def evaluate(fn, dev, metric=EM):
def evaluate(fn, openai_predict_fn, dev, metric=EM):
data = []

for example in tqdm.tqdm(dev):
question = example.question
prediction = fn(question)
prediction = openai_predict_fn(question)

d = dict(example)

Expand All @@ -84,4 +84,11 @@ def evaluate(fn, dev, metric=EM):

return percentage

# Check OpenAI library version and import syntax functions accordingly
import openai
if openai.__version__ == '0.28':
from .syntax_v028 import *
elif openai.__version__ == '1.0':
from .syntax_v1 import *


0 comments on commit 6220e7d

Please sign in to comment.