Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions src/examples/openai_example/embeddings_create.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,5 +16,6 @@ def embeddings_create():
result = client.embeddings.create(
model="text-embedding-ada-002",
input="Once upon a time, there was a pirate.",
encoding_format="float",
)
return result
4 changes: 2 additions & 2 deletions src/examples/openai_example/images_edit.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,8 @@ def image_edit():

response = client.images.edit(
model="dall-e-2",
image=open("./resources/lounge_flamingo.png", "rb"),
mask=open("./resources/mask.png", "rb"),
image=open("src/examples/openai_example/resources/lounge_flamingo.png", "rb"),
mask=open("src/examples/openai_example/resources/mask.png", "rb"),
prompt="A sunlit indoor lounge area with a pool and duck standing in side with flamingo.",
n=1,
size="1024x1024",
Expand Down
25 changes: 5 additions & 20 deletions src/langtrace_python_sdk/instrumentation/ollama/patch.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,4 @@
from langtrace_python_sdk.constants.instrumentation.ollama import APIS
from importlib_metadata import version as v
from langtrace_python_sdk.constants import LANGTRACE_SDK_NAME
from langtrace_python_sdk.utils import set_span_attribute
from langtrace_python_sdk.utils.llm import (
get_extra_attributes,
Expand All @@ -10,11 +8,7 @@
set_event_completion,
)
from langtrace_python_sdk.utils.silently_fail import silently_fail
from langtrace_python_sdk.constants.instrumentation.common import (
LANGTRACE_ADDITIONAL_SPAN_ATTRIBUTES_KEY,
SERVICE_PROVIDERS,
)
from opentelemetry import baggage
from langtrace_python_sdk.constants.instrumentation.common import SERVICE_PROVIDERS
from langtrace.trace_attributes import LLMSpanAttributes, Event
from opentelemetry.trace import SpanKind
import json
Expand All @@ -28,7 +22,10 @@ def traced_method(wrapped, instance, args, kwargs):
service_provider = SERVICE_PROVIDERS["OLLAMA"]
span_attributes = {
**get_langtrace_attributes(version, service_provider),
**get_llm_request_attributes(kwargs),
**get_llm_request_attributes(
kwargs,
prompts=kwargs.get("messages", None),
),
**get_llm_url(instance),
SpanAttributes.LLM_PATH: api["ENDPOINT"],
SpanAttributes.LLM_RESPONSE_FORMAT: kwargs.get("format"),
Expand Down Expand Up @@ -146,18 +143,6 @@ def _set_input_attributes(span, kwargs, attributes):
for field, value in attributes.model_dump(by_alias=True).items():
set_span_attribute(span, field, value)

if "messages" in kwargs:
set_span_attribute(
span,
SpanAttributes.LLM_PROMPTS,
json.dumps(kwargs.get("messages", [])),
)
if "prompt" in kwargs:
set_span_attribute(
span,
SpanAttributes.LLM_PROMPTS,
json.dumps([{"role": "user", "content": kwargs.get("prompt", "")}]),
)
if "options" in kwargs:
set_span_attribute(
span,
Expand Down
17 changes: 6 additions & 11 deletions src/langtrace_python_sdk/instrumentation/openai/patch.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@
is_streaming,
set_event_completion,
StreamWrapper,
set_span_attributes,
)
from openai._types import NOT_GIVEN

Expand All @@ -67,8 +68,7 @@ def traced_method(wrapped, instance, args, kwargs):
kind=SpanKind.CLIENT.value,
context=set_span_in_context(trace.get_current_span()),
) as span:
for field, value in attributes.model_dump(by_alias=True).items():
set_span_attribute(span, field, value)
set_span_attributes(span, attributes)
try:
# Attempt to call the original method
result = wrapped(*args, **kwargs)
Expand Down Expand Up @@ -131,8 +131,7 @@ async def traced_method(wrapped, instance, args, kwargs):
kind=SpanKind.CLIENT.value,
context=set_span_in_context(trace.get_current_span()),
) as span:
for field, value in attributes.model_dump(by_alias=True).items():
set_span_attribute(span, field, value)
set_span_attributes(span, attributes)
try:
# Attempt to call the original method
result = await wrapped(*args, **kwargs)
Expand Down Expand Up @@ -197,9 +196,7 @@ def traced_method(wrapped, instance, args, kwargs):
kind=SpanKind.CLIENT.value,
context=set_span_in_context(trace.get_current_span()),
) as span:
for field, value in attributes.model_dump(by_alias=True).items():
if value is not None:
span.set_attribute(field, value)
set_span_attributes(span, attributes)
try:
# Attempt to call the original method
result = wrapped(*args, **kwargs)
Expand Down Expand Up @@ -463,8 +460,7 @@ def traced_method(wrapped, instance, args, kwargs):
context=set_span_in_context(trace.get_current_span()),
) as span:

for field, value in attributes.model_dump(by_alias=True).items():
set_span_attribute(span, field, value)
set_span_attributes(span, attributes)
try:
# Attempt to call the original method
result = wrapped(*args, **kwargs)
Expand Down Expand Up @@ -521,8 +517,7 @@ async def traced_method(wrapped, instance, args, kwargs):
context=set_span_in_context(trace.get_current_span()),
) as span:

for field, value in attributes.model_dump(by_alias=True).items():
set_span_attribute(span, field, value)
set_span_attributes(span, attributes)
try:
# Attempt to call the original method
result = await wrapped(*args, **kwargs)
Expand Down
13 changes: 8 additions & 5 deletions src/langtrace_python_sdk/utils/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,8 +95,11 @@ def get_llm_request_attributes(kwargs, prompts=None, model=None):

user = kwargs.get("user", None)
if prompts is None:
prompts = [{"role": user, "content": kwargs.get("prompt", [])}]

prompts = (
[{"role": user or "user", "content": kwargs.get("prompt")}]
if "prompt" in kwargs
else None
)
top_k = (
kwargs.get("n", None)
or kwargs.get("k", None)
Expand All @@ -105,21 +108,21 @@ def get_llm_request_attributes(kwargs, prompts=None, model=None):
)

top_p = kwargs.get("p", None) or kwargs.get("top_p", None)

tools = kwargs.get("tools", None)
return {
SpanAttributes.LLM_REQUEST_MODEL: model or kwargs.get("model"),
SpanAttributes.LLM_IS_STREAMING: kwargs.get("stream"),
SpanAttributes.LLM_REQUEST_TEMPERATURE: kwargs.get("temperature"),
SpanAttributes.LLM_TOP_K: top_k,
SpanAttributes.LLM_PROMPTS: json.dumps(prompts),
SpanAttributes.LLM_PROMPTS: json.dumps(prompts) if prompts else None,
SpanAttributes.LLM_USER: user,
SpanAttributes.LLM_REQUEST_TOP_P: top_p,
SpanAttributes.LLM_REQUEST_MAX_TOKENS: kwargs.get("max_tokens"),
SpanAttributes.LLM_SYSTEM_FINGERPRINT: kwargs.get("system_fingerprint"),
SpanAttributes.LLM_PRESENCE_PENALTY: kwargs.get("presence_penalty"),
SpanAttributes.LLM_FREQUENCY_PENALTY: kwargs.get("frequency_penalty"),
SpanAttributes.LLM_REQUEST_SEED: kwargs.get("seed"),
SpanAttributes.LLM_TOOLS: json.dumps(kwargs.get("tools")),
SpanAttributes.LLM_TOOLS: json.dumps(tools) if tools else None,
SpanAttributes.LLM_REQUEST_LOGPROPS: kwargs.get("logprobs"),
SpanAttributes.LLM_REQUEST_LOGITBIAS: kwargs.get("logit_bias"),
SpanAttributes.LLM_REQUEST_TOP_LOGPROPS: kwargs.get("top_logprobs"),
Expand Down