diff --git a/src/examples/openai_example/chat_completion_tool_choice.py b/src/examples/openai_example/chat_completion_tool_choice.py new file mode 100644 index 00000000..e1aaef11 --- /dev/null +++ b/src/examples/openai_example/chat_completion_tool_choice.py @@ -0,0 +1,73 @@ +# Example taken from https://platform.openai.com/docs/guides/function-calling +import json + +from dotenv import find_dotenv, load_dotenv +from openai import OpenAI + +from langtrace_python_sdk import langtrace + +client = OpenAI() + +_ = load_dotenv(find_dotenv()) + +langtrace.init( + write_spans_to_console=True, +) + + +# Example dummy function hard coded to return the same weather +# In production, this could be your backend API or an external API +def get_current_weather(location, unit="fahrenheit"): + """Get the current weather in a given location""" + if "tokyo" in location.lower(): + return json.dumps({"location": "Tokyo", "temperature": "10", "unit": unit}) + elif "san francisco" in location.lower(): + return json.dumps( + {"location": "San Francisco", "temperature": "72", "unit": unit} + ) + elif "paris" in location.lower(): + return json.dumps({"location": "Paris", "temperature": "22", "unit": unit}) + else: + return json.dumps({"location": location, "temperature": "unknown"}) + + +def run_conversation(): + # Step 1: send the conversation and available functions to the model + messages = [ + { + "role": "user", + "content": "What's the weather like in San Francisco, Tokyo, and Paris?", + } + ] + tools = [ + { + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA", + }, + "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, + }, + "required": ["location"], + }, + }, + } + ] + response = client.chat.completions.create( + model="gpt-4o", + messages=messages, + tools=tools, + tool_choice="required", # auto is default, but we'll be explicit + ) + response_message = response.choices[0].message + tool_calls = response_message.tool_calls + print(tool_calls) + + +print(run_conversation()) diff --git a/src/langtrace_python_sdk/instrumentation/anthropic/patch.py b/src/langtrace_python_sdk/instrumentation/anthropic/patch.py index b06fae33..dbaade3e 100644 --- a/src/langtrace_python_sdk/instrumentation/anthropic/patch.py +++ b/src/langtrace_python_sdk/instrumentation/anthropic/patch.py @@ -48,7 +48,9 @@ def traced_method(wrapped, instance, args, kwargs): prompts = kwargs.get("messages", []) system = kwargs.get("system") if system: - prompts = [{"role": "system", "content": system}] + kwargs.get("messages", []) + prompts = [{"role": "system", "content": system}] + kwargs.get( + "messages", [] + ) span_attributes = { **get_langtrace_attributes(version, service_provider), diff --git a/src/langtrace_python_sdk/instrumentation/gemini/patch.py b/src/langtrace_python_sdk/instrumentation/gemini/patch.py index b4b9b799..ceedb379 100644 --- a/src/langtrace_python_sdk/instrumentation/gemini/patch.py +++ b/src/langtrace_python_sdk/instrumentation/gemini/patch.py @@ -110,7 +110,10 @@ def get_llm_model(instance): def serialize_prompts(args, kwargs, instance): prompts = [] - if hasattr(instance, "_system_instruction") and instance._system_instruction is not None: + if ( + hasattr(instance, "_system_instruction") + and instance._system_instruction is not None + ): system_prompt = { "role": "system", "content": instance._system_instruction.__dict__["_pb"].parts[0].text, diff --git a/src/langtrace_python_sdk/utils/llm.py b/src/langtrace_python_sdk/utils/llm.py index ed29451a..965f2324 100644 --- a/src/langtrace_python_sdk/utils/llm.py +++ b/src/langtrace_python_sdk/utils/llm.py @@ -123,6 +123,7 @@ def get_llm_request_attributes(kwargs, prompts=None, model=None): SpanAttributes.LLM_FREQUENCY_PENALTY: kwargs.get("frequency_penalty"), SpanAttributes.LLM_REQUEST_SEED: kwargs.get("seed"), SpanAttributes.LLM_TOOLS: json.dumps(tools) if tools else None, + SpanAttributes.LLM_TOOL_CHOICE: kwargs.get("tool_choice"), SpanAttributes.LLM_REQUEST_LOGPROPS: kwargs.get("logprobs"), SpanAttributes.LLM_REQUEST_LOGITBIAS: kwargs.get("logit_bias"), SpanAttributes.LLM_REQUEST_TOP_LOGPROPS: kwargs.get("top_logprobs"),