Skip to content
Merged
9 changes: 9 additions & 0 deletions src/examples/azureopenai_example/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
from examples.azureopenai_example.completion import chat_completion
from langtrace_python_sdk import with_langtrace_root_span, langtrace

langtrace.init()

class AzureOpenAIRunner:
@with_langtrace_root_span("AzureOpenAI")
def run(self):
chat_completion()
22 changes: 22 additions & 0 deletions src/examples/azureopenai_example/completion.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
import os
from langchain_openai import AzureChatOpenAI

from langtrace_python_sdk.utils.with_root_span import with_langtrace_root_span

model = AzureChatOpenAI(
azure_endpoint=os.environ['AZURE_OPENAI_ENDPOINT'],
azure_deployment=os.environ['AZURE_OPENAI_DEPLOYMENT_NAME'],
openai_api_version=os.environ['AZURE_OPENAI_API_VERSION'],
)

@with_langtrace_root_span()
def chat_completion():
messages = [
(
"system",
"You are a helpful assistant that translates English to French. Translate the user sentence.",
),
("human", "I love programming."),
]
result = model.invoke(messages)
print(result)
2 changes: 2 additions & 0 deletions src/examples/langchain_example/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
from langtrace_python_sdk import with_langtrace_root_span

from .groq_example import groq_basic, groq_streaming
from .langgraph_example_tools import basic_graph_tools


class LangChainRunner:
Expand All @@ -10,6 +11,7 @@ def run(self):
basic_app()
rag()
load_and_split()
basic_graph_tools()


class GroqRunner:
Expand Down
151 changes: 151 additions & 0 deletions src/examples/langchain_example/langgraph_example_tools.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,151 @@
from typing import Annotated

from langchain_anthropic import ChatAnthropic
from langchain_core.messages import HumanMessage
from langchain_core.pydantic_v1 import BaseModel
from typing_extensions import TypedDict
from langchain_core.pydantic_v1 import BaseModel, Field
from langchain_core.tools import Tool
from langgraph.checkpoint.memory import MemorySaver
from langgraph.graph import StateGraph
from langgraph.graph.message import add_messages
from langgraph.prebuilt import ToolNode, tools_condition
from langchain_core.messages import AIMessage, ToolMessage

from langtrace_python_sdk import langtrace

langtrace.init()

primes = {998: 7901, 999: 7907, 1000: 7919}


class PrimeInput(BaseModel):
n: int = Field()


def is_prime(n: int) -> bool:
if n <= 1 or (n % 2 == 0 and n > 2):
return False
for i in range(3, int(n**0.5) + 1, 2):
if n % i == 0:
return False
return True


def get_prime(n: int, primes: dict = primes) -> str:
return str(primes.get(int(n)))


async def aget_prime(n: int, primes: dict = primes) -> str:
return str(primes.get(int(n)))


class State(TypedDict):
messages: Annotated[list, add_messages]
# This flag is new
ask_human: bool


class RequestAssistance(BaseModel):
"""Escalate the conversation to an expert. Use this if you are unable to assist directly or if the user requires support beyond your permissions.

To use this function, relay the user's 'request' so the expert can provide the right guidance.
"""

request: str


llm = ChatAnthropic(model="claude-3-haiku-20240307")
# We can bind the llm to a tool definition, a pydantic model, or a json schema
llm_with_tools = llm.bind_tools([RequestAssistance])
tools = [
Tool(
name="GetPrime",
func=get_prime,
description="A tool that returns the `n`th prime number",
args_schema=PrimeInput,
coroutine=aget_prime,
),
]


def chatbot(state: State):
response = llm_with_tools.invoke(state["messages"])
ask_human = False
if (
response.tool_calls
and response.tool_calls[0]["name"] == RequestAssistance.__name__
):
ask_human = True
return {"messages": [response], "ask_human": ask_human}


graph_builder = StateGraph(State)

graph_builder.add_node("chatbot", chatbot)
graph_builder.add_node("tools", ToolNode(tools=tools))


def create_response(response: str, ai_message: AIMessage):
return ToolMessage(
content=response,
tool_call_id=ai_message.tool_calls[0]["id"],
)


def human_node(state: State):
new_messages = []
if not isinstance(state["messages"][-1], ToolMessage):
# Typically, the user will have updated the state during the interrupt.
# If they choose not to, we will include a placeholder ToolMessage to
# let the LLM continue.
new_messages.append(
create_response("No response from human.", state["messages"][-1])
)
return {
# Append the new messages
"messages": new_messages,
# Unset the flag
"ask_human": False,
}


def select_next_node(state: State):
if state["ask_human"]:
return "human"
# Otherwise, we can route as before
return tools_condition(state)


def basic_graph_tools():
graph_builder.add_node("human", human_node)
graph_builder.add_conditional_edges(
"chatbot",
select_next_node,
{"human": "human", "tools": "tools", "__end__": "__end__"},
)
graph_builder.add_edge("tools", "chatbot")
graph_builder.add_edge("human", "chatbot")
graph_builder.set_entry_point("chatbot")
memory = MemorySaver()
graph = graph_builder.compile(
checkpointer=memory,
interrupt_before=["human"],
)

config = {"configurable": {"thread_id": "1"}}
events = graph.stream(
{
"messages": [
(
"user",
"I'm learning LangGraph. Could you do some research on it for me?",
)
]
},
config,
stream_mode="values",
)
for event in events:
if "messages" in event:
event["messages"][-1]
24 changes: 15 additions & 9 deletions src/langtrace_python_sdk/instrumentation/anthropic/patch.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
from langtrace.trace_attributes import Event, SpanAttributes, LLMSpanAttributes
from langtrace_python_sdk.utils import set_span_attribute
from langtrace_python_sdk.utils.silently_fail import silently_fail
import json

from langtrace_python_sdk.utils.llm import (
StreamWrapper,
Expand Down Expand Up @@ -99,15 +100,20 @@ def set_response_attributes(
set_span_attribute(
span, SpanAttributes.LLM_RESPONSE_MODEL, result.model
)
content_item = result.content[0]
completion = [
{
"role": result.role or "assistant",
"content": content_item.text,
"type": content_item.type,
}
]
set_event_completion(span, completion)
if hasattr(result, "content") and result.content[0] is not None:
content = result.content[0]
typ = content.type
role = result.role if result.role else "assistant"
if typ == "tool_result" or typ == "tool_use":
content = content.json() # type: ignore
set_span_attribute(
span, SpanAttributes.LLM_TOOL_RESULTS, json.dumps(content)
)
if typ == "text":
content = result.content[0].text
set_event_completion(
span, [{"type": typ, role: role, content: content}]
)

if (
hasattr(result, "system_fingerprint")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,6 @@ def __init__(

class ContentItem:
role: str
content: str
text: str
type: str

Expand Down
2 changes: 1 addition & 1 deletion src/langtrace_python_sdk/utils/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ def get_llm_request_attributes(kwargs, prompts=None, model=None, operation_name=
tools = kwargs.get("tools", None)
return {
SpanAttributes.LLM_OPERATION_NAME: operation_name,
SpanAttributes.LLM_REQUEST_MODEL: model or kwargs.get("model"),
SpanAttributes.LLM_REQUEST_MODEL: model or kwargs.get("model") or "gpt-3.5-turbo",
SpanAttributes.LLM_IS_STREAMING: kwargs.get("stream"),
SpanAttributes.LLM_REQUEST_TEMPERATURE: kwargs.get("temperature"),
SpanAttributes.LLM_TOP_K: top_k,
Expand Down
2 changes: 1 addition & 1 deletion src/langtrace_python_sdk/version.py
Original file line number Diff line number Diff line change
@@ -1 +1 @@
__version__ = "2.3.5"
__version__ = "2.3.7"
11 changes: 9 additions & 2 deletions src/run_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,13 +2,14 @@

ENABLED_EXAMPLES = {
"anthropic": False,
"azureopenai": True,
"chroma": False,
"cohere": False,
"fastapi": False,
"langchain": False,
"langchain": True,
"llamaindex": False,
"hiveagent": False,
"openai": True,
"openai": False,
"perplexity": False,
"pinecone": False,
"qdrant": False,
Expand Down Expand Up @@ -110,3 +111,9 @@

print(Fore.BLUE + "Running Mistral example" + Fore.RESET)
MistralRunner().run()

if ENABLED_EXAMPLES["azureopenai"]:
from examples.azureopenai_example import AzureOpenAIRunner

print(Fore.BLUE + "Running Azure OpenAI example" + Fore.RESET)
AzureOpenAIRunner().run()
1 change: 1 addition & 0 deletions src/tests/anthropic/test_anthropic.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ def test_anthropic(anthropic_client, exporter):
"stream": False,
"max_tokens": 1024,
}

anthropic_client.messages.create(**kwargs)
spans = exporter.get_finished_spans()
completion_span = spans[-1]
Expand Down