From d2c826759e17012fd4257cc84bbf55ad6ff1b697 Mon Sep 17 00:00:00 2001 From: darshit-s3 <119623510+darshit-s3@users.noreply.github.com> Date: Thu, 5 Sep 2024 19:20:45 +0530 Subject: [PATCH 1/7] fix: weaviate fusion type enum handling (#328) --- src/langtrace_python_sdk/instrumentation/weaviate/patch.py | 7 +++++-- src/langtrace_python_sdk/version.py | 2 +- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/src/langtrace_python_sdk/instrumentation/weaviate/patch.py b/src/langtrace_python_sdk/instrumentation/weaviate/patch.py index c2ea4b39..523b047a 100644 --- a/src/langtrace_python_sdk/instrumentation/weaviate/patch.py +++ b/src/langtrace_python_sdk/instrumentation/weaviate/patch.py @@ -47,9 +47,12 @@ def extract_inputs(args, kwargs): extracted_params = {} - kwargs_without_properties = {k: v for k, v in kwargs.items() if k != "properties"} + kwargs_without_properties = { + k: v for k, v in kwargs.items() if k not in ["properties", "fusion_type"] + } extracted_params.update(extract_input_params(args, kwargs_without_properties)) - + if kwargs.get("fusion_type", None): + extracted_params["fusion_type"] = kwargs["fusion_type"].value if kwargs.get("properties", None): extracted_params["properties"] = [] for each_prop in kwargs.get("properties"): diff --git a/src/langtrace_python_sdk/version.py b/src/langtrace_python_sdk/version.py index 4618fe65..f4bd92b2 100644 --- a/src/langtrace_python_sdk/version.py +++ b/src/langtrace_python_sdk/version.py @@ -1 +1 @@ -__version__ = "2.3.4" +__version__ = "2.3.5" From 72ee4a6cc5411d4ded1ff372ec93a4e3fc83813f Mon Sep 17 00:00:00 2001 From: Rohit Kadhe Date: Thu, 5 Sep 2024 09:05:45 -0600 Subject: [PATCH 2/7] fix bug causing issue with tools --- src/examples/langchain_example/__init__.py | 2 + .../langgraph_example_tools.py | 151 ++++++++++++++++++ .../instrumentation/anthropic/patch.py | 30 ++-- 3 files changed, 170 insertions(+), 13 deletions(-) create mode 100644 src/examples/langchain_example/langgraph_example_tools.py diff --git a/src/examples/langchain_example/__init__.py b/src/examples/langchain_example/__init__.py index abc72b43..f421b6b1 100644 --- a/src/examples/langchain_example/__init__.py +++ b/src/examples/langchain_example/__init__.py @@ -2,6 +2,7 @@ from langtrace_python_sdk import with_langtrace_root_span from .groq_example import groq_basic, groq_streaming +from .langgraph_example_tools import basic_graph_tools class LangChainRunner: @@ -10,6 +11,7 @@ def run(self): basic_app() rag() load_and_split() + basic_graph_tools() class GroqRunner: diff --git a/src/examples/langchain_example/langgraph_example_tools.py b/src/examples/langchain_example/langgraph_example_tools.py new file mode 100644 index 00000000..3ffaf69e --- /dev/null +++ b/src/examples/langchain_example/langgraph_example_tools.py @@ -0,0 +1,151 @@ +from typing import Annotated + +from langchain_anthropic import ChatAnthropic +from langchain_core.messages import HumanMessage +from langchain_core.pydantic_v1 import BaseModel +from typing_extensions import TypedDict +from langchain_core.pydantic_v1 import BaseModel, Field +from langchain_core.tools import Tool +from langgraph.checkpoint.memory import MemorySaver +from langgraph.graph import StateGraph +from langgraph.graph.message import add_messages +from langgraph.prebuilt import ToolNode, tools_condition +from langchain_core.messages import AIMessage, ToolMessage + +from langtrace_python_sdk import langtrace + +langtrace.init() + +primes = {998: 7901, 999: 7907, 1000: 7919} + + +class PrimeInput(BaseModel): + n: int = Field() + + +def is_prime(n: int) -> bool: + if n <= 1 or (n % 2 == 0 and n > 2): + return False + for i in range(3, int(n**0.5) + 1, 2): + if n % i == 0: + return False + return True + + +def get_prime(n: int, primes: dict = primes) -> str: + return str(primes.get(int(n))) + + +async def aget_prime(n: int, primes: dict = primes) -> str: + return str(primes.get(int(n))) + + +class State(TypedDict): + messages: Annotated[list, add_messages] + # This flag is new + ask_human: bool + + +class RequestAssistance(BaseModel): + """Escalate the conversation to an expert. Use this if you are unable to assist directly or if the user requires support beyond your permissions. + + To use this function, relay the user's 'request' so the expert can provide the right guidance. + """ + + request: str + + +llm = ChatAnthropic(model="claude-3-haiku-20240307") +# We can bind the llm to a tool definition, a pydantic model, or a json schema +llm_with_tools = llm.bind_tools([RequestAssistance]) +tools = [ + Tool( + name="GetPrime", + func=get_prime, + description="A tool that returns the `n`th prime number", + args_schema=PrimeInput, + coroutine=aget_prime, + ), +] + + +def chatbot(state: State): + response = llm_with_tools.invoke(state["messages"]) + ask_human = False + if ( + response.tool_calls + and response.tool_calls[0]["name"] == RequestAssistance.__name__ + ): + ask_human = True + return {"messages": [response], "ask_human": ask_human} + + +graph_builder = StateGraph(State) + +graph_builder.add_node("chatbot", chatbot) +graph_builder.add_node("tools", ToolNode(tools=tools)) + + +def create_response(response: str, ai_message: AIMessage): + return ToolMessage( + content=response, + tool_call_id=ai_message.tool_calls[0]["id"], + ) + + +def human_node(state: State): + new_messages = [] + if not isinstance(state["messages"][-1], ToolMessage): + # Typically, the user will have updated the state during the interrupt. + # If they choose not to, we will include a placeholder ToolMessage to + # let the LLM continue. + new_messages.append( + create_response("No response from human.", state["messages"][-1]) + ) + return { + # Append the new messages + "messages": new_messages, + # Unset the flag + "ask_human": False, + } + + +def select_next_node(state: State): + if state["ask_human"]: + return "human" + # Otherwise, we can route as before + return tools_condition(state) + + +def basic_graph_tools(): + graph_builder.add_node("human", human_node) + graph_builder.add_conditional_edges( + "chatbot", + select_next_node, + {"human": "human", "tools": "tools", "__end__": "__end__"}, + ) + graph_builder.add_edge("tools", "chatbot") + graph_builder.add_edge("human", "chatbot") + graph_builder.set_entry_point("chatbot") + memory = MemorySaver() + graph = graph_builder.compile( + checkpointer=memory, + interrupt_before=["human"], + ) + + config = {"configurable": {"thread_id": "1"}} + events = graph.stream( + { + "messages": [ + ( + "user", + "I'm learning LangGraph. Could you do some research on it for me?", + ) + ] + }, + config, + stream_mode="values", + ) + for event in events: + if "messages" in event: + event["messages"][-1] diff --git a/src/langtrace_python_sdk/instrumentation/anthropic/patch.py b/src/langtrace_python_sdk/instrumentation/anthropic/patch.py index 3ac5f5a6..0c2d7dd0 100644 --- a/src/langtrace_python_sdk/instrumentation/anthropic/patch.py +++ b/src/langtrace_python_sdk/instrumentation/anthropic/patch.py @@ -61,7 +61,7 @@ def traced_method(wrapped, instance, args, kwargs): **get_llm_request_attributes(kwargs, prompts=prompts), **get_llm_url(instance), SpanAttributes.LLM_PATH: APIS["MESSAGES_CREATE"]["ENDPOINT"], - **get_extra_attributes(), + **get_extra_attributes(), # type: ignore } attributes = LLMSpanAttributes(**span_attributes) @@ -88,22 +88,26 @@ def traced_method(wrapped, instance, args, kwargs): @silently_fail def set_response_attributes(result, span, kwargs): if not is_streaming(kwargs): + if hasattr(result, "content") and result.content is not None: set_span_attribute( span, SpanAttributes.LLM_RESPONSE_MODEL, result.model ) - completion = [ - { - "role": result.role if result.role else "assistant", - "content": result.content[0].text, - "type": result.content[0].type, - } - ] - set_event_completion(span, completion) - - else: - responses = [] - set_event_completion(span, responses) + if hasattr(result, "content") and result.content[0] is not None: + content = result.content[0] + typ = content.type + role = result.role if result.role else "assistant" + if typ == "tool_result" or typ == "tool_use": + content = content.json() + set_span_attribute( + span, SpanAttributes.LLM_TOOL_RESULTS, json.dumps(content) + ) + if typ == "text": + content = result.content[0].text + content = content.text + set_event_completion( + span, [{type: typ, role: role, content: content}] + ) if ( hasattr(result, "system_fingerprint") From e06c94389187541ee9fd8285557bcb44381824a8 Mon Sep 17 00:00:00 2001 From: Rohit Kadhe Date: Thu, 5 Sep 2024 09:20:59 -0600 Subject: [PATCH 3/7] --amend --- src/langtrace_python_sdk/instrumentation/anthropic/patch.py | 2 +- src/tests/anthropic/test_anthropic.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/src/langtrace_python_sdk/instrumentation/anthropic/patch.py b/src/langtrace_python_sdk/instrumentation/anthropic/patch.py index 8c4976f2..3dbe2487 100644 --- a/src/langtrace_python_sdk/instrumentation/anthropic/patch.py +++ b/src/langtrace_python_sdk/instrumentation/anthropic/patch.py @@ -112,7 +112,7 @@ def set_response_attributes( if typ == "text": content = result.content[0].text set_event_completion( - span, [{type: typ, role: role, content: content}] + span, [{"type": typ, role: role, content: content}] ) if ( diff --git a/src/tests/anthropic/test_anthropic.py b/src/tests/anthropic/test_anthropic.py index 248d592a..f739684b 100644 --- a/src/tests/anthropic/test_anthropic.py +++ b/src/tests/anthropic/test_anthropic.py @@ -27,6 +27,7 @@ def test_anthropic(anthropic_client, exporter): "stream": False, "max_tokens": 1024, } + anthropic_client.messages.create(**kwargs) spans = exporter.get_finished_spans() completion_span = spans[-1] From 09385900655b8f0b0c23b9345aa36188b9559c6c Mon Sep 17 00:00:00 2001 From: Obinna Okafor Date: Thu, 5 Sep 2024 17:45:49 +0100 Subject: [PATCH 4/7] add default llm request model when it isn't specified --- src/langtrace_python_sdk/utils/llm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/langtrace_python_sdk/utils/llm.py b/src/langtrace_python_sdk/utils/llm.py index 8ff00364..21594e58 100644 --- a/src/langtrace_python_sdk/utils/llm.py +++ b/src/langtrace_python_sdk/utils/llm.py @@ -126,7 +126,7 @@ def get_llm_request_attributes(kwargs, prompts=None, model=None, operation_name= tools = kwargs.get("tools", None) return { SpanAttributes.LLM_OPERATION_NAME: operation_name, - SpanAttributes.LLM_REQUEST_MODEL: model or kwargs.get("model"), + SpanAttributes.LLM_REQUEST_MODEL: model or kwargs.get("model") or "gpt-3.5-turbo", SpanAttributes.LLM_IS_STREAMING: kwargs.get("stream"), SpanAttributes.LLM_REQUEST_TEMPERATURE: kwargs.get("temperature"), SpanAttributes.LLM_TOP_K: top_k, From e010e86948e7f875190533925ab12fc348334749 Mon Sep 17 00:00:00 2001 From: Obinna Okafor Date: Fri, 6 Sep 2024 15:04:30 +0100 Subject: [PATCH 5/7] add auzre openai example --- src/examples/azureopenai_example/__init__.py | 9 ++++++++ .../azureopenai_example/completion.py | 22 +++++++++++++++++++ src/run_example.py | 9 +++++++- 3 files changed, 39 insertions(+), 1 deletion(-) create mode 100644 src/examples/azureopenai_example/__init__.py create mode 100644 src/examples/azureopenai_example/completion.py diff --git a/src/examples/azureopenai_example/__init__.py b/src/examples/azureopenai_example/__init__.py new file mode 100644 index 00000000..77af9fa4 --- /dev/null +++ b/src/examples/azureopenai_example/__init__.py @@ -0,0 +1,9 @@ +from examples.azureopenai_example.completion import chat_completion +from langtrace_python_sdk import with_langtrace_root_span, langtrace + +langtrace.init() + +class AzureOpenAIRunner: + @with_langtrace_root_span("AzureOpenAI") + def run(self): + chat_completion() diff --git a/src/examples/azureopenai_example/completion.py b/src/examples/azureopenai_example/completion.py new file mode 100644 index 00000000..dd24b8f6 --- /dev/null +++ b/src/examples/azureopenai_example/completion.py @@ -0,0 +1,22 @@ +import os +from langchain_openai import AzureChatOpenAI + +from langtrace_python_sdk.utils.with_root_span import with_langtrace_root_span + +model = AzureChatOpenAI( + azure_endpoint=os.environ['AZURE_OPENAI_ENDPOINT'], + azure_deployment=os.environ['AZURE_OPENAI_DEPLOYMENT_NAME'], + openai_api_version=os.environ['AZURE_OPENAI_API_VERSION'], +) + +@with_langtrace_root_span() +def chat_completion(): + messages = [ + ( + "system", + "You are a helpful assistant that translates English to French. Translate the user sentence.", + ), + ("human", "I love programming."), + ] + result = model.invoke(messages) + print(result) diff --git a/src/run_example.py b/src/run_example.py index bf316e35..9c1f7610 100644 --- a/src/run_example.py +++ b/src/run_example.py @@ -2,13 +2,14 @@ ENABLED_EXAMPLES = { "anthropic": False, + "azureopenai": True, "chroma": False, "cohere": False, "fastapi": False, "langchain": False, "llamaindex": False, "hiveagent": False, - "openai": True, + "openai": False, "perplexity": False, "pinecone": False, "qdrant": False, @@ -110,3 +111,9 @@ print(Fore.BLUE + "Running Mistral example" + Fore.RESET) MistralRunner().run() + +if ENABLED_EXAMPLES["azureopenai"]: + from examples.azureopenai_example import AzureOpenAIRunner + + print(Fore.BLUE + "Running Azure OpenAI example" + Fore.RESET) + AzureOpenAIRunner().run() From a22642e56441a4f6cbeadb80e8a5918ea25f3bc6 Mon Sep 17 00:00:00 2001 From: Obinna Okafor Date: Fri, 6 Sep 2024 15:23:39 +0100 Subject: [PATCH 6/7] bump version --- src/langtrace_python_sdk/version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/langtrace_python_sdk/version.py b/src/langtrace_python_sdk/version.py index f4bd92b2..249e7c76 100644 --- a/src/langtrace_python_sdk/version.py +++ b/src/langtrace_python_sdk/version.py @@ -1 +1 @@ -__version__ = "2.3.5" +__version__ = "2.3.6" From 1cbb89f9539980076885ffa303814948e59df71e Mon Sep 17 00:00:00 2001 From: Rohit Kadhe Date: Fri, 6 Sep 2024 09:22:27 -0600 Subject: [PATCH 7/7] release version 2.3.7 --- src/langtrace_python_sdk/version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/langtrace_python_sdk/version.py b/src/langtrace_python_sdk/version.py index 249e7c76..a0b06b86 100644 --- a/src/langtrace_python_sdk/version.py +++ b/src/langtrace_python_sdk/version.py @@ -1 +1 @@ -__version__ = "2.3.6" +__version__ = "2.3.7"