Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions src/examples/langchain_example/__init__.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
from examples.langchain_example.langchain_google_genai import basic_google_genai
from .basic import basic_app, rag, load_and_split
from langtrace_python_sdk import with_langtrace_root_span

Expand All @@ -12,6 +13,7 @@ def run(self):
rag()
load_and_split()
basic_graph_tools()
basic_google_genai()


class GroqRunner:
Expand Down
29 changes: 29 additions & 0 deletions src/examples/langchain_example/langchain_google_example.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
from langchain_core.messages import HumanMessage
from langchain_google_genai import ChatGoogleGenerativeAI
from langtrace_python_sdk.utils.with_root_span import with_langtrace_root_span
from dotenv import find_dotenv, load_dotenv
from langtrace_python_sdk import langtrace

_ = load_dotenv(find_dotenv())

langtrace.init()

@with_langtrace_root_span("basic_google_genai")
def basic_google_genai():
llm = ChatGoogleGenerativeAI(model="gemini-1.5-flash")
# example
message = HumanMessage(
content=[
{
"type": "text",
"text": "What's in this image?",
},
]
)
message_image = HumanMessage(content="https://picsum.photos/seed/picsum/200/300")

res = llm.invoke([message, message_image])
# print(res)


basic_google_genai()
Original file line number Diff line number Diff line change
Expand Up @@ -81,21 +81,16 @@ def _instrument(self, **kwargs):
tracer_provider = kwargs.get("tracer_provider")
tracer = get_tracer(__name__, "", tracer_provider)
version = importlib.metadata.version("langchain")

wrap_function_wrapper(
"langchain.agents.agent",
"RunnableAgent.plan",
generic_patch(
"RunnableAgent.plan", "plan", tracer, version, True, True
),
generic_patch("RunnableAgent.plan", "plan", tracer, version, True, True),
)

wrap_function_wrapper(
"langchain.agents.agent",
"RunnableAgent.aplan",
generic_patch(
"RunnableAgent.aplan", "plan", tracer, version, True, True
),
generic_patch("RunnableAgent.aplan", "plan", tracer, version, True, True),
)

# modules_to_patch = []
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -134,6 +134,20 @@ def _instrument(self, **kwargs):
]

modules_to_patch = [
(
"langchain_core.language_models.chat_models",
"chatmodel",
generic_patch,
True,
True,
),
(
"langchain_core.language_models.base",
"language_model",
generic_patch,
True,
True,
),
("langchain_core.retrievers", "retriever", generic_patch, True, True),
("langchain_core.prompts.chat", "prompt", generic_patch, True, True),
(
Expand Down
55 changes: 38 additions & 17 deletions src/langtrace_python_sdk/instrumentation/langchain_core/patch.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,24 @@ def traced_method(wrapped, instance, args, kwargs):
"langtrace.service.version": version,
"langtrace.version": v(LANGTRACE_SDK_NAME),
"langchain.task.name": task,
**(extra_attributes if extra_attributes is not None else {}),
"gen_ai.request.model": (
instance.model if hasattr(instance, "model") else None
),
SpanAttributes.LLM_REQUEST_MAX_TOKENS: (
instance.max_output_tokens
if hasattr(instance, "max_output_tokens")
else None
),
SpanAttributes.LLM_TOP_K: (
instance.top_k if hasattr(instance, "top_k") else None
),
SpanAttributes.LLM_REQUEST_TOP_P: (
instance.top_p if hasattr(instance, "top_p") else None
),
SpanAttributes.LLM_REQUEST_TEMPERATURE: (
instance.temperature if hasattr(instance, "temperature") else None
),
**(extra_attributes if extra_attributes is not None else {}), # type: ignore
}

if trace_input and len(args) > 0:
Expand All @@ -79,21 +96,17 @@ def traced_method(wrapped, instance, args, kwargs):
try:
# Attempt to call the original method
result = wrapped(*args, **kwargs)

if trace_output:
span.set_attribute("langchain.outputs", to_json_string(result))
if hasattr(result, 'usage'):
prompt_tokens = result.usage.prompt_tokens
completion_tokens = result.usage.completion_tokens
span.set_attribute(SpanAttributes.LLM_USAGE_PROMPT_TOKENS, prompt_tokens)
span.set_attribute(SpanAttributes.LLM_USAGE_COMPLETION_TOKENS, completion_tokens)
elif hasattr(result, 'generations') and len(result.generations) > 0 and len(result.generations[0]) > 0 and hasattr(result.generations[0][0], 'text') and isinstance(result.generations[0][0].text, str):
span.set_attribute(SpanAttributes.LLM_USAGE_COMPLETION_TOKENS, instance.get_num_tokens(result.generations[0][0].text))
elif len(args) > 0 and len(args[0]) > 0 and not hasattr(args[0][0], 'text') and hasattr(instance, 'get_num_tokens'):
span.set_attribute(SpanAttributes.LLM_USAGE_PROMPT_TOKENS, instance.get_num_tokens(args[0][0]))
elif len(args) > 0 and len(args[0]) > 0 and hasattr(args[0][0], 'text') and isinstance(args[0][0].text, str) and hasattr(instance, 'get_num_tokens'):
span.set_attribute(SpanAttributes.LLM_USAGE_PROMPT_TOKENS, instance.get_num_tokens(args[0][0].text))

if hasattr(result, "usage_metadata"):
span.set_attribute(
SpanAttributes.LLM_USAGE_PROMPT_TOKENS,
result.usage_metadata["input_tokens"],
)
span.set_attribute(
SpanAttributes.LLM_USAGE_COMPLETION_TOKENS,
result.usage_metadata["output_tokens"],
)
span.set_status(StatusCode.OK)
return result
except Exception as err:
Expand Down Expand Up @@ -208,9 +221,17 @@ def clean_empty(d):
if not isinstance(d, (dict, list, tuple)):
return d
if isinstance(d, tuple):
return tuple(val for val in (clean_empty(val) for val in d) if val != () and val is not None)
return tuple(
val
for val in (clean_empty(val) for val in d)
if val != () and val is not None
)
if isinstance(d, list):
return [val for val in (clean_empty(val) for val in d) if val != [] and val is not None]
return [
val
for val in (clean_empty(val) for val in d)
if val != [] and val is not None
]
result = {}
for k, val in d.items():
if isinstance(val, dict):
Expand All @@ -226,7 +247,7 @@ def clean_empty(d):
result[k] = val.strip()
elif isinstance(val, object):
# some langchain objects have a text attribute
val = getattr(val, 'text', None)
val = getattr(val, "text", None)
if val is not None and val.strip() != "":
result[k] = val.strip()
return result
Expand Down
7 changes: 4 additions & 3 deletions src/langtrace_python_sdk/langtrace.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,8 +128,8 @@ def init(
"embedchain": EmbedchainInstrumentation(),
"qdrant-client": QdrantInstrumentation(),
"langchain": LangchainInstrumentation(),
"langchain-core": LangchainCoreInstrumentation(),
"langchain-community": LangchainCommunityInstrumentation(),
"langchain_core": LangchainCoreInstrumentation(),
"langchain_community": LangchainCommunityInstrumentation(),
"langgraph": LanggraphInstrumentation(),
"anthropic": AnthropicInstrumentation(),
"cohere": CohereInstrumentation(),
Expand Down Expand Up @@ -190,7 +190,8 @@ def init(


def init_instrumentations(
disable_instrumentations: DisableInstrumentations, all_instrumentations: dict
disable_instrumentations: Optional[DisableInstrumentations],
all_instrumentations: dict
):
if disable_instrumentations is None:
for idx, (name, v) in enumerate(all_instrumentations.items()):
Expand Down
2 changes: 1 addition & 1 deletion src/run_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

ENABLED_EXAMPLES = {
"anthropic": False,
"azureopenai": True,
"azureopenai": False,
"chroma": False,
"cohere": False,
"fastapi": False,
Expand Down