Skip to content

Commit

Permalink
solving #2887 (#5127)
Browse files Browse the repository at this point in the history
# Allowing openAI fine-tuned models
Very simple fix that checks whether a openAI `model_name` is a
fine-tuned model when loading `context_size` and when computing call's
cost in the `openai_callback`.

Fixes #2887 
---------

Co-authored-by: Dev 2049 <dev.dev2049@gmail.com>
  • Loading branch information
2 people authored and vowelparrot committed May 24, 2023
1 parent 319978f commit 76bac21
Show file tree
Hide file tree
Showing 2 changed files with 12 additions and 0 deletions.
8 changes: 8 additions & 0 deletions langchain/callbacks/openai_info.py
Expand Up @@ -24,12 +24,20 @@
"text-davinci-003": 0.02,
"text-davinci-002": 0.02,
"code-davinci-002": 0.02,
"ada-finetuned": 0.0016,
"babbage-finetuned": 0.0024,
"curie-finetuned": 0.0120,
"davinci-finetuned": 0.1200,
}


def get_openai_token_cost_for_model(
model_name: str, num_tokens: int, is_completion: bool = False
) -> float:
# handling finetuned models
if "ft-" in model_name:
model_name = f"{model_name.split(':')[0]}-finetuned"

suffix = "-completion" if is_completion and model_name.startswith("gpt-4") else ""
model = model_name.lower() + suffix
if model not in MODEL_COST_PER_1K_TOKENS:
Expand Down
4 changes: 4 additions & 0 deletions langchain/llms/openai.py
Expand Up @@ -512,6 +512,10 @@ def modelname_to_contextsize(self, modelname: str) -> int:
"code-cushman-001": 2048,
}

# handling finetuned models
if "ft-" in modelname:
modelname = modelname.split(":")[0]

context_size = model_token_mapping.get(modelname, None)

if context_size is None:
Expand Down

0 comments on commit 76bac21

Please sign in to comment.