Skip to content
Closed
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 4 additions & 2 deletions guardrails/llm_providers.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
from typing import Any, Awaitable, Callable, Dict, List, Optional, cast

import openai
from litellm import completion
from pydantic import BaseModel
from tenacity import retry, retry_if_exception_type, wait_exponential_jitter

Expand Down Expand Up @@ -128,8 +129,9 @@ def openai_chat_wrapper(
api_key = os.environ.get("OPENAI_API_KEY")

# TODO: update this as new models are released
# Supported models here: https://litellm.readthedocs.io/en/latest/supported/
if base_model:
openai_response = openai.ChatCompletion.create(
openai_response = completion(
api_key=api_key,
model=model,
messages=chat_prompt(text, instructions, **kwargs),
Expand All @@ -140,7 +142,7 @@ def openai_chat_wrapper(
)
return openai_response["choices"][0]["message"]["function_call"]["arguments"]
else:
openai_response = openai.ChatCompletion.create(
openai_response = completion(
api_key=api_key,
model=model,
messages=chat_prompt(text, instructions, **kwargs),
Expand Down