Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
24 commits
Select commit Hold shift + click to select a range
4d717b1
Merge branch 'development' into release
karthikscale3 Apr 24, 2024
0233826
Merge branch 'main' of github.com:Scale3-Labs/langtrace-python-sdk in…
karthikscale3 Apr 28, 2024
7f4e951
Merge branch 'development' into release
karthikscale3 Apr 28, 2024
81a6ca0
Merge
karthikscale3 Jun 13, 2024
0c19f77
Merge branch 'development' into release
karthikscale3 Jun 13, 2024
c3a6ccf
remove logs
karthikscale3 Jun 13, 2024
a99cf10
remove requirements
karthikscale3 Jun 13, 2024
1379b27
Merge branch 'main' of github.com:Scale3-Labs/langtrace-python-sdk in…
karthikscale3 Jun 17, 2024
dae04e7
Merge branch 'development' into release
karthikscale3 Jun 17, 2024
129e927
Merge branch 'main' of github.com:Scale3-Labs/langtrace-python-sdk in…
karthikscale3 Jun 24, 2024
16e67f9
Merge branch 'development' into release
karthikscale3 Jun 24, 2024
e604e93
Bump version
karthikscale3 Jun 24, 2024
7e00473
Merge branch 'main' of github.com:Scale3-Labs/langtrace-python-sdk in…
karthikscale3 Jun 24, 2024
6ac71aa
Merge branch 'development' into release
karthikscale3 Jun 24, 2024
c39bf01
Merge branch 'main' of github.com:Scale3-Labs/langtrace-python-sdk in…
karthikscale3 Jun 24, 2024
f89e38c
Merge branch 'development' into release
karthikscale3 Jun 24, 2024
a9d3400
DSPy - Bugfixes and update to dspy-ai (#246)
karthikscale3 Jul 19, 2024
e95e743
Merge branch 'main' of github.com:Scale3-Labs/langtrace-python-sdk in…
karthikscale3 Jul 19, 2024
9ebbe17
Merge branch 'development' into release
karthikscale3 Jul 19, 2024
e6d8542
chore: add back openai tool choice arg (#245)
darshit-s3 Jul 22, 2024
5af542f
Merge branch 'main' of github.com:Scale3-Labs/langtrace-python-sdk in…
karthikscale3 Jul 23, 2024
8b41bf9
Allow DSPy span naming (#249)
karthikscale3 Jul 23, 2024
16b9d46
Merge branch 'development' into release
karthikscale3 Jul 23, 2024
a820d79
Merge branch 'development' into release
karthikscale3 Jul 23, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 6 additions & 14 deletions src/examples/dspy_example/math_problems_cot_parallel.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
import contextvars
import dspy
from dspy.datasets.gsm8k import GSM8K, gsm8k_metric
from dspy.teleprompt import BootstrapFewShot
from concurrent.futures import ThreadPoolExecutor
from opentelemetry.context import get_current, attach, detach

# flake8: noqa
from langtrace_python_sdk import langtrace, with_langtrace_root_span
Expand All @@ -22,7 +22,8 @@ def __init__(self):
self.prog = dspy.ChainOfThought("question -> answer")

def forward(self, question):
return self.prog(question=question)
result = inject_additional_attributes(lambda: self.prog(question=question), {'langtrace.span.name': 'MathProblemsCotParallel'})
return result

@with_langtrace_root_span(name="parallel_example")
def example():
Expand All @@ -34,21 +35,12 @@ def example():
optimized_cot = teleprompter.compile(CoT(), trainset=gsm8k_trainset)

questions = [
"What is the cosine of 0?",
"What is the tangent of 0?",
"What is the sine of 0?",
"What is the tangent of 100?",
]

current_context = get_current()

def run_with_context(context, func, *args, **kwargs):
token = attach(context)
try:
return func(*args, **kwargs)
finally:
detach(token)

with ThreadPoolExecutor(max_workers=2) as executor:
futures = [executor.submit(run_with_context, current_context, optimized_cot, question=q) for q in questions]
futures = [executor.submit(contextvars.copy_context().run, optimized_cot, question=q) for q in questions]

for future in futures:
ans = future.result()
Expand Down
73 changes: 73 additions & 0 deletions src/examples/openai_example/chat_completion_tool_choice.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
# Example taken from https://platform.openai.com/docs/guides/function-calling
import json

from dotenv import find_dotenv, load_dotenv
from openai import OpenAI

from langtrace_python_sdk import langtrace

client = OpenAI()

_ = load_dotenv(find_dotenv())

langtrace.init(
write_spans_to_console=True,
)


# Example dummy function hard coded to return the same weather
# In production, this could be your backend API or an external API
def get_current_weather(location, unit="fahrenheit"):
"""Get the current weather in a given location"""
if "tokyo" in location.lower():
return json.dumps({"location": "Tokyo", "temperature": "10", "unit": unit})
elif "san francisco" in location.lower():
return json.dumps(
{"location": "San Francisco", "temperature": "72", "unit": unit}
)
elif "paris" in location.lower():
return json.dumps({"location": "Paris", "temperature": "22", "unit": unit})
else:
return json.dumps({"location": location, "temperature": "unknown"})


def run_conversation():
# Step 1: send the conversation and available functions to the model
messages = [
{
"role": "user",
"content": "What's the weather like in San Francisco, Tokyo, and Paris?",
}
]
tools = [
{
"type": "function",
"function": {
"name": "get_current_weather",
"description": "Get the current weather in a given location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g. San Francisco, CA",
},
"unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
},
"required": ["location"],
},
},
}
]
response = client.chat.completions.create(
model="gpt-4o",
messages=messages,
tools=tools,
tool_choice="required", # auto is default, but we'll be explicit
)
response_message = response.choices[0].message
tool_calls = response_message.tool_calls
print(tool_calls)


print(run_conversation())
4 changes: 3 additions & 1 deletion src/langtrace_python_sdk/instrumentation/anthropic/patch.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,9 @@ def traced_method(wrapped, instance, args, kwargs):
prompts = kwargs.get("messages", [])
system = kwargs.get("system")
if system:
prompts = [{"role": "system", "content": system}] + kwargs.get("messages", [])
prompts = [{"role": "system", "content": system}] + kwargs.get(
"messages", []
)

span_attributes = {
**get_langtrace_attributes(version, service_provider),
Expand Down
24 changes: 21 additions & 3 deletions src/langtrace_python_sdk/instrumentation/dspy/patch.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,8 +61,14 @@ def traced_method(wrapped, instance, args, kwargs):
if config and len(config) > 0:
span_attributes["dspy.optimizer.config"] = json.dumps(config)

# passed operation name
opname = operation_name
if extra_attributes is not None and "langtrace.span.name" in extra_attributes:
# append the operation name to the span name
opname = f"{operation_name}-{extra_attributes['langtrace.span.name']}"

attributes = FrameworkSpanAttributes(**span_attributes)
with tracer.start_as_current_span(operation_name, kind=SpanKind.CLIENT) as span:
with tracer.start_as_current_span(opname, kind=SpanKind.CLIENT) as span:
_set_input_attributes(span, kwargs, attributes)

try:
Expand Down Expand Up @@ -100,6 +106,12 @@ def traced_method(wrapped, instance, args, kwargs):
**(extra_attributes if extra_attributes is not None else {}),
}

# passed operation name
opname = operation_name
if extra_attributes is not None and "langtrace.span.name" in extra_attributes:
# append the operation name to the span name
opname = f"{operation_name}-{extra_attributes['langtrace.span.name']}"

if instance.__class__.__name__:
span_attributes["dspy.signature.name"] = instance.__class__.__name__
span_attributes["dspy.signature"] = str(instance)
Expand All @@ -108,7 +120,7 @@ def traced_method(wrapped, instance, args, kwargs):
span_attributes["dspy.signature.args"] = str(kwargs)

attributes = FrameworkSpanAttributes(**span_attributes)
with tracer.start_as_current_span(operation_name, kind=SpanKind.CLIENT) as span:
with tracer.start_as_current_span(opname, kind=SpanKind.CLIENT) as span:
_set_input_attributes(span, kwargs, attributes)

try:
Expand Down Expand Up @@ -147,6 +159,12 @@ def traced_method(wrapped, instance, args, kwargs):
**(extra_attributes if extra_attributes is not None else {}),
}

# passed operation name
opname = operation_name
if extra_attributes is not None and "langtrace.span.name" in extra_attributes:
# append the operation name to the span name
opname = f"{operation_name}-{extra_attributes['langtrace.span.name']}"

if hasattr(instance, "devset"):
span_attributes["dspy.evaluate.devset"] = str(getattr(instance, "devset"))
if hasattr(instance, "trainset"):
Expand Down Expand Up @@ -175,7 +193,7 @@ def traced_method(wrapped, instance, args, kwargs):
span_attributes["dspy.evaluate.args"] = str(args)

attributes = FrameworkSpanAttributes(**span_attributes)
with tracer.start_as_current_span(operation_name, kind=SpanKind.CLIENT) as span:
with tracer.start_as_current_span(opname, kind=SpanKind.CLIENT) as span:
_set_input_attributes(span, kwargs, attributes)

try:
Expand Down
5 changes: 4 additions & 1 deletion src/langtrace_python_sdk/instrumentation/gemini/patch.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,10 @@ def get_llm_model(instance):

def serialize_prompts(args, kwargs, instance):
prompts = []
if hasattr(instance, "_system_instruction") and instance._system_instruction is not None:
if (
hasattr(instance, "_system_instruction")
and instance._system_instruction is not None
):
system_prompt = {
"role": "system",
"content": instance._system_instruction.__dict__["_pb"].parts[0].text,
Expand Down
1 change: 1 addition & 0 deletions src/langtrace_python_sdk/utils/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,6 +123,7 @@ def get_llm_request_attributes(kwargs, prompts=None, model=None):
SpanAttributes.LLM_FREQUENCY_PENALTY: kwargs.get("frequency_penalty"),
SpanAttributes.LLM_REQUEST_SEED: kwargs.get("seed"),
SpanAttributes.LLM_TOOLS: json.dumps(tools) if tools else None,
SpanAttributes.LLM_TOOL_CHOICE: kwargs.get("tool_choice"),
SpanAttributes.LLM_REQUEST_LOGPROPS: kwargs.get("logprobs"),
SpanAttributes.LLM_REQUEST_LOGITBIAS: kwargs.get("logit_bias"),
SpanAttributes.LLM_REQUEST_TOP_LOGPROPS: kwargs.get("top_logprobs"),
Expand Down
2 changes: 1 addition & 1 deletion src/langtrace_python_sdk/version.py
Original file line number Diff line number Diff line change
@@ -1 +1 @@
__version__ = "2.2.2"
__version__ = "2.2.3"