diff --git a/src/examples/azureopenai_example/__init__.py b/src/examples/azureopenai_example/__init__.py new file mode 100644 index 00000000..77af9fa4 --- /dev/null +++ b/src/examples/azureopenai_example/__init__.py @@ -0,0 +1,9 @@ +from examples.azureopenai_example.completion import chat_completion +from langtrace_python_sdk import with_langtrace_root_span, langtrace + +langtrace.init() + +class AzureOpenAIRunner: + @with_langtrace_root_span("AzureOpenAI") + def run(self): + chat_completion() diff --git a/src/examples/azureopenai_example/completion.py b/src/examples/azureopenai_example/completion.py new file mode 100644 index 00000000..dd24b8f6 --- /dev/null +++ b/src/examples/azureopenai_example/completion.py @@ -0,0 +1,22 @@ +import os +from langchain_openai import AzureChatOpenAI + +from langtrace_python_sdk.utils.with_root_span import with_langtrace_root_span + +model = AzureChatOpenAI( + azure_endpoint=os.environ['AZURE_OPENAI_ENDPOINT'], + azure_deployment=os.environ['AZURE_OPENAI_DEPLOYMENT_NAME'], + openai_api_version=os.environ['AZURE_OPENAI_API_VERSION'], +) + +@with_langtrace_root_span() +def chat_completion(): + messages = [ + ( + "system", + "You are a helpful assistant that translates English to French. Translate the user sentence.", + ), + ("human", "I love programming."), + ] + result = model.invoke(messages) + print(result) diff --git a/src/langtrace_python_sdk/utils/llm.py b/src/langtrace_python_sdk/utils/llm.py index 8ff00364..21594e58 100644 --- a/src/langtrace_python_sdk/utils/llm.py +++ b/src/langtrace_python_sdk/utils/llm.py @@ -126,7 +126,7 @@ def get_llm_request_attributes(kwargs, prompts=None, model=None, operation_name= tools = kwargs.get("tools", None) return { SpanAttributes.LLM_OPERATION_NAME: operation_name, - SpanAttributes.LLM_REQUEST_MODEL: model or kwargs.get("model"), + SpanAttributes.LLM_REQUEST_MODEL: model or kwargs.get("model") or "gpt-3.5-turbo", SpanAttributes.LLM_IS_STREAMING: kwargs.get("stream"), SpanAttributes.LLM_REQUEST_TEMPERATURE: kwargs.get("temperature"), SpanAttributes.LLM_TOP_K: top_k, diff --git a/src/langtrace_python_sdk/version.py b/src/langtrace_python_sdk/version.py index f4bd92b2..249e7c76 100644 --- a/src/langtrace_python_sdk/version.py +++ b/src/langtrace_python_sdk/version.py @@ -1 +1 @@ -__version__ = "2.3.5" +__version__ = "2.3.6" diff --git a/src/run_example.py b/src/run_example.py index bf316e35..9c1f7610 100644 --- a/src/run_example.py +++ b/src/run_example.py @@ -2,13 +2,14 @@ ENABLED_EXAMPLES = { "anthropic": False, + "azureopenai": True, "chroma": False, "cohere": False, "fastapi": False, "langchain": False, "llamaindex": False, "hiveagent": False, - "openai": True, + "openai": False, "perplexity": False, "pinecone": False, "qdrant": False, @@ -110,3 +111,9 @@ print(Fore.BLUE + "Running Mistral example" + Fore.RESET) MistralRunner().run() + +if ENABLED_EXAMPLES["azureopenai"]: + from examples.azureopenai_example import AzureOpenAIRunner + + print(Fore.BLUE + "Running Azure OpenAI example" + Fore.RESET) + AzureOpenAIRunner().run()