diff --git a/src/examples/dspy_example/math_problems_cot_parallel.py b/src/examples/dspy_example/math_problems_cot_parallel.py index 8c5fabf7..b70890b5 100644 --- a/src/examples/dspy_example/math_problems_cot_parallel.py +++ b/src/examples/dspy_example/math_problems_cot_parallel.py @@ -1,8 +1,8 @@ +import contextvars import dspy from dspy.datasets.gsm8k import GSM8K, gsm8k_metric from dspy.teleprompt import BootstrapFewShot from concurrent.futures import ThreadPoolExecutor -from opentelemetry.context import get_current, attach, detach # flake8: noqa from langtrace_python_sdk import langtrace, with_langtrace_root_span @@ -22,7 +22,8 @@ def __init__(self): self.prog = dspy.ChainOfThought("question -> answer") def forward(self, question): - return self.prog(question=question) + result = inject_additional_attributes(lambda: self.prog(question=question), {'langtrace.span.name': 'MathProblemsCotParallel'}) + return result @with_langtrace_root_span(name="parallel_example") def example(): @@ -34,21 +35,12 @@ def example(): optimized_cot = teleprompter.compile(CoT(), trainset=gsm8k_trainset) questions = [ - "What is the cosine of 0?", - "What is the tangent of 0?", + "What is the sine of 0?", + "What is the tangent of 100?", ] - current_context = get_current() - - def run_with_context(context, func, *args, **kwargs): - token = attach(context) - try: - return func(*args, **kwargs) - finally: - detach(token) - with ThreadPoolExecutor(max_workers=2) as executor: - futures = [executor.submit(run_with_context, current_context, optimized_cot, question=q) for q in questions] + futures = [executor.submit(contextvars.copy_context().run, optimized_cot, question=q) for q in questions] for future in futures: ans = future.result() diff --git a/src/examples/openai_example/chat_completion_tool_choice.py b/src/examples/openai_example/chat_completion_tool_choice.py new file mode 100644 index 00000000..e1aaef11 --- /dev/null +++ b/src/examples/openai_example/chat_completion_tool_choice.py @@ -0,0 +1,73 @@ +# Example taken from https://platform.openai.com/docs/guides/function-calling +import json + +from dotenv import find_dotenv, load_dotenv +from openai import OpenAI + +from langtrace_python_sdk import langtrace + +client = OpenAI() + +_ = load_dotenv(find_dotenv()) + +langtrace.init( + write_spans_to_console=True, +) + + +# Example dummy function hard coded to return the same weather +# In production, this could be your backend API or an external API +def get_current_weather(location, unit="fahrenheit"): + """Get the current weather in a given location""" + if "tokyo" in location.lower(): + return json.dumps({"location": "Tokyo", "temperature": "10", "unit": unit}) + elif "san francisco" in location.lower(): + return json.dumps( + {"location": "San Francisco", "temperature": "72", "unit": unit} + ) + elif "paris" in location.lower(): + return json.dumps({"location": "Paris", "temperature": "22", "unit": unit}) + else: + return json.dumps({"location": location, "temperature": "unknown"}) + + +def run_conversation(): + # Step 1: send the conversation and available functions to the model + messages = [ + { + "role": "user", + "content": "What's the weather like in San Francisco, Tokyo, and Paris?", + } + ] + tools = [ + { + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA", + }, + "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, + }, + "required": ["location"], + }, + }, + } + ] + response = client.chat.completions.create( + model="gpt-4o", + messages=messages, + tools=tools, + tool_choice="required", # auto is default, but we'll be explicit + ) + response_message = response.choices[0].message + tool_calls = response_message.tool_calls + print(tool_calls) + + +print(run_conversation()) diff --git a/src/langtrace_python_sdk/instrumentation/anthropic/patch.py b/src/langtrace_python_sdk/instrumentation/anthropic/patch.py index b06fae33..dbaade3e 100644 --- a/src/langtrace_python_sdk/instrumentation/anthropic/patch.py +++ b/src/langtrace_python_sdk/instrumentation/anthropic/patch.py @@ -48,7 +48,9 @@ def traced_method(wrapped, instance, args, kwargs): prompts = kwargs.get("messages", []) system = kwargs.get("system") if system: - prompts = [{"role": "system", "content": system}] + kwargs.get("messages", []) + prompts = [{"role": "system", "content": system}] + kwargs.get( + "messages", [] + ) span_attributes = { **get_langtrace_attributes(version, service_provider), diff --git a/src/langtrace_python_sdk/instrumentation/dspy/patch.py b/src/langtrace_python_sdk/instrumentation/dspy/patch.py index 181b276d..4b57fe16 100644 --- a/src/langtrace_python_sdk/instrumentation/dspy/patch.py +++ b/src/langtrace_python_sdk/instrumentation/dspy/patch.py @@ -61,8 +61,14 @@ def traced_method(wrapped, instance, args, kwargs): if config and len(config) > 0: span_attributes["dspy.optimizer.config"] = json.dumps(config) + # passed operation name + opname = operation_name + if extra_attributes is not None and "langtrace.span.name" in extra_attributes: + # append the operation name to the span name + opname = f"{operation_name}-{extra_attributes['langtrace.span.name']}" + attributes = FrameworkSpanAttributes(**span_attributes) - with tracer.start_as_current_span(operation_name, kind=SpanKind.CLIENT) as span: + with tracer.start_as_current_span(opname, kind=SpanKind.CLIENT) as span: _set_input_attributes(span, kwargs, attributes) try: @@ -100,6 +106,12 @@ def traced_method(wrapped, instance, args, kwargs): **(extra_attributes if extra_attributes is not None else {}), } + # passed operation name + opname = operation_name + if extra_attributes is not None and "langtrace.span.name" in extra_attributes: + # append the operation name to the span name + opname = f"{operation_name}-{extra_attributes['langtrace.span.name']}" + if instance.__class__.__name__: span_attributes["dspy.signature.name"] = instance.__class__.__name__ span_attributes["dspy.signature"] = str(instance) @@ -108,7 +120,7 @@ def traced_method(wrapped, instance, args, kwargs): span_attributes["dspy.signature.args"] = str(kwargs) attributes = FrameworkSpanAttributes(**span_attributes) - with tracer.start_as_current_span(operation_name, kind=SpanKind.CLIENT) as span: + with tracer.start_as_current_span(opname, kind=SpanKind.CLIENT) as span: _set_input_attributes(span, kwargs, attributes) try: @@ -147,6 +159,12 @@ def traced_method(wrapped, instance, args, kwargs): **(extra_attributes if extra_attributes is not None else {}), } + # passed operation name + opname = operation_name + if extra_attributes is not None and "langtrace.span.name" in extra_attributes: + # append the operation name to the span name + opname = f"{operation_name}-{extra_attributes['langtrace.span.name']}" + if hasattr(instance, "devset"): span_attributes["dspy.evaluate.devset"] = str(getattr(instance, "devset")) if hasattr(instance, "trainset"): @@ -175,7 +193,7 @@ def traced_method(wrapped, instance, args, kwargs): span_attributes["dspy.evaluate.args"] = str(args) attributes = FrameworkSpanAttributes(**span_attributes) - with tracer.start_as_current_span(operation_name, kind=SpanKind.CLIENT) as span: + with tracer.start_as_current_span(opname, kind=SpanKind.CLIENT) as span: _set_input_attributes(span, kwargs, attributes) try: diff --git a/src/langtrace_python_sdk/instrumentation/gemini/patch.py b/src/langtrace_python_sdk/instrumentation/gemini/patch.py index b4b9b799..ceedb379 100644 --- a/src/langtrace_python_sdk/instrumentation/gemini/patch.py +++ b/src/langtrace_python_sdk/instrumentation/gemini/patch.py @@ -110,7 +110,10 @@ def get_llm_model(instance): def serialize_prompts(args, kwargs, instance): prompts = [] - if hasattr(instance, "_system_instruction") and instance._system_instruction is not None: + if ( + hasattr(instance, "_system_instruction") + and instance._system_instruction is not None + ): system_prompt = { "role": "system", "content": instance._system_instruction.__dict__["_pb"].parts[0].text, diff --git a/src/langtrace_python_sdk/utils/llm.py b/src/langtrace_python_sdk/utils/llm.py index ed29451a..965f2324 100644 --- a/src/langtrace_python_sdk/utils/llm.py +++ b/src/langtrace_python_sdk/utils/llm.py @@ -123,6 +123,7 @@ def get_llm_request_attributes(kwargs, prompts=None, model=None): SpanAttributes.LLM_FREQUENCY_PENALTY: kwargs.get("frequency_penalty"), SpanAttributes.LLM_REQUEST_SEED: kwargs.get("seed"), SpanAttributes.LLM_TOOLS: json.dumps(tools) if tools else None, + SpanAttributes.LLM_TOOL_CHOICE: kwargs.get("tool_choice"), SpanAttributes.LLM_REQUEST_LOGPROPS: kwargs.get("logprobs"), SpanAttributes.LLM_REQUEST_LOGITBIAS: kwargs.get("logit_bias"), SpanAttributes.LLM_REQUEST_TOP_LOGPROPS: kwargs.get("top_logprobs"), diff --git a/src/langtrace_python_sdk/version.py b/src/langtrace_python_sdk/version.py index ba51cedf..f394e699 100644 --- a/src/langtrace_python_sdk/version.py +++ b/src/langtrace_python_sdk/version.py @@ -1 +1 @@ -__version__ = "2.2.2" +__version__ = "2.2.3"