Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
50 changes: 26 additions & 24 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -229,6 +229,7 @@ prompt = get_prompt_from_registry(<Registry ID>, options={"prompt_version": 1, "
```

### Opt out of tracing prompt and completion data

By default, prompt and completion data are captured. If you would like to opt out of it, set the following env var,

`TRACE_PROMPT_COMPLETION_DATA=false`
Expand All @@ -237,30 +238,31 @@ By default, prompt and completion data are captured. If you would like to opt ou

Langtrace automatically captures traces from the following vendors:

| Vendor | Type | Typescript SDK | Python SDK |
| ------------ | --------------- | ------------------ | ------------------------------- |
| OpenAI | LLM | :white_check_mark: | :white_check_mark: |
| Anthropic | LLM | :white_check_mark: | :white_check_mark: |
| Azure OpenAI | LLM | :white_check_mark: | :white_check_mark: |
| Cohere | LLM | :white_check_mark: | :white_check_mark: |
| Groq | LLM | :x: | :white_check_mark: |
| Perplexity | LLM | :white_check_mark: | :white_check_mark: |
| Gemini | LLM | :x: | :white_check_mark: |
| Mistral | LLM | :x: | :white_check_mark: |
| Langchain | Framework | :x: | :white_check_mark: |
| LlamaIndex | Framework | :white_check_mark: | :white_check_mark: |
| Langgraph | Framework | :x: | :white_check_mark: |
| DSPy | Framework | :x: | :white_check_mark: |
| CrewAI | Framework | :x: | :white_check_mark: |
| Ollama | Framework | :x: | :white_check_mark: |
| VertexAI | Framework | :x: | :white_check_mark: |
| Vercel AI SDK| Framework | :white_check_mark: | :x: |
| EmbedChain | Framework | :x: | :white_check_mark: |
| Pinecone | Vector Database | :white_check_mark: | :white_check_mark: |
| ChromaDB | Vector Database | :white_check_mark: | :white_check_mark: |
| QDrant | Vector Database | :white_check_mark: | :white_check_mark: |
| Weaviate | Vector Database | :white_check_mark: | :white_check_mark: |
| PGVector | Vector Database | :white_check_mark: | :white_check_mark: (SQLAlchemy) |
| Vendor | Type | Typescript SDK | Python SDK |
| ------------- | --------------- | ------------------ | ------------------------------- |
| OpenAI | LLM | :white_check_mark: | :white_check_mark: |
| Anthropic | LLM | :white_check_mark: | :white_check_mark: |
| Azure OpenAI | LLM | :white_check_mark: | :white_check_mark: |
| Cohere | LLM | :white_check_mark: | :white_check_mark: |
| Groq | LLM | :x: | :white_check_mark: |
| Perplexity | LLM | :white_check_mark: | :white_check_mark: |
| Gemini | LLM | :x: | :white_check_mark: |
| Mistral | LLM | :x: | :white_check_mark: |
| Langchain | Framework | :x: | :white_check_mark: |
| LlamaIndex | Framework | :white_check_mark: | :white_check_mark: |
| Langgraph | Framework | :x: | :white_check_mark: |
| DSPy | Framework | :x: | :white_check_mark: |
| CrewAI | Framework | :x: | :white_check_mark: |
| Ollama | Framework | :x: | :white_check_mark: |
| VertexAI | Framework | :x: | :white_check_mark: |
| Vercel AI SDK | Framework | :white_check_mark: | :x: |
| EmbedChain | Framework | :x: | :white_check_mark: |
| Autogen | Framework | :x: | :white_check_mark: |
| Pinecone | Vector Database | :white_check_mark: | :white_check_mark: |
| ChromaDB | Vector Database | :white_check_mark: | :white_check_mark: |
| QDrant | Vector Database | :white_check_mark: | :white_check_mark: |
| Weaviate | Vector Database | :white_check_mark: | :white_check_mark: |
| PGVector | Vector Database | :white_check_mark: | :white_check_mark: (SQLAlchemy) |

---

Expand Down
8 changes: 8 additions & 0 deletions src/examples/autogen_example/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
from .main import main as autogen_main
from .main import comedy_show


class AutoGenRunner:
def run(self):
# autogen_main()
comedy_show()
72 changes: 72 additions & 0 deletions src/examples/autogen_example/main.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
from langtrace_python_sdk import langtrace
from autogen import ConversableAgent
from dotenv import load_dotenv
from autogen.coding import LocalCommandLineCodeExecutor
import tempfile


load_dotenv()
langtrace.init(write_spans_to_console=False)
# agentops.init(api_key=os.getenv("AGENTOPS_API_KEY"))
# Create a temporary directory to store the code files.
temp_dir = tempfile.TemporaryDirectory()


# Create a local command line code executor.
executor = LocalCommandLineCodeExecutor(
timeout=10, # Timeout for each code execution in seconds.
work_dir=temp_dir.name, # Use the temporary directory to store the code files.
)


def main():

agent = ConversableAgent(
"chatbot",
llm_config={"config_list": [{"model": "gpt-4"}], "cache_seed": None},
code_execution_config=False, # Turn off code execution, by default it is off.
function_map=None, # No registered functions, by default it is None.
human_input_mode="NEVER", # Never ask for human input.
)

reply = agent.generate_reply(
messages=[{"content": "Tell me a joke.", "role": "user"}]
)
return reply


def comedy_show():
cathy = ConversableAgent(
name="cathy",
system_message="Your name is Cathy and you are a part of a duo of comedians.",
llm_config={
"config_list": [{"model": "gpt-4o-mini", "temperature": 0.9}],
"cache_seed": None,
},
description="Cathy is a comedian",
max_consecutive_auto_reply=10,
code_execution_config={
"executor": executor
}, # Use the local command line code executor.
function_map=None,
chat_messages=None,
silent=True,
default_auto_reply="Sorry, I don't know what to say.",
human_input_mode="NEVER", # Never ask for human input.
)

joe = ConversableAgent(
"joe",
system_message="Your name is Joe and you are a part of a duo of comedians.",
llm_config={
"config_list": [{"model": "gpt-4o-mini", "temperature": 0.7}],
"cache_seed": None,
},
human_input_mode="NEVER", # Never ask for human input.
)

result = joe.initiate_chat(
recipient=cathy, message="Cathy, tell me a joke.", max_turns=2
)

return result
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@
"GEMINI": "Gemini",
"MISTRAL": "Mistral",
"EMBEDCHAIN": "Embedchain",
"AUTOGEN": "Autogen",
}

LANGTRACE_ADDITIONAL_SPAN_ATTRIBUTES_KEY = "langtrace_additional_attributes"
2 changes: 2 additions & 0 deletions src/langtrace_python_sdk/instrumentation/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
from .weaviate import WeaviateInstrumentation
from .ollama import OllamaInstrumentor
from .dspy import DspyInstrumentation
from .autogen import AutogenInstrumentation
from .vertexai import VertexAIInstrumentation
from .gemini import GeminiInstrumentation
from .mistral import MistralInstrumentation
Expand All @@ -37,6 +38,7 @@
"WeaviateInstrumentation",
"OllamaInstrumentor",
"DspyInstrumentation",
"AutogenInstrumentation",
"VertexAIInstrumentation",
"GeminiInstrumentation",
"MistralInstrumentation",
Expand Down
3 changes: 3 additions & 0 deletions src/langtrace_python_sdk/instrumentation/autogen/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
from .instrumentation import AutogenInstrumentation

__all__ = ["AutogenInstrumentation"]
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
from opentelemetry.trace import get_tracer
from wrapt import wrap_function_wrapper as _W
from importlib_metadata import version as v
from .patch import patch_generate_reply, patch_initiate_chat


class AutogenInstrumentation(BaseInstrumentor):
def instrumentation_dependencies(self):
return ["autogen >= 0.1.0"]

def _instrument(self, **kwargs):
print("Instrumneting autogen")
tracer_provider = kwargs.get("tracer_provider")
tracer = get_tracer(__name__, "", tracer_provider)
version = v("autogen")
# conversable_agent.intiate_chat
# conversable_agent.register_function
# agent.Agent
# AgentCreation
# Tools --> Register_for_llm, register_for_execution, register_for_function
try:
_W(
module="autogen.agentchat.conversable_agent",
name="ConversableAgent.initiate_chat",
wrapper=patch_initiate_chat(
"conversable_agent.initiate_chat", version, tracer
),
)

_W(
module="autogen.agentchat.conversable_agent",
name="ConversableAgent.generate_reply",
wrapper=patch_generate_reply(
"conversable_agent.generate_reply", version, tracer
),
)
except Exception as e:
pass

def _uninstrument(self, **kwargs):
pass
132 changes: 132 additions & 0 deletions src/langtrace_python_sdk/instrumentation/autogen/patch.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,132 @@
from langtrace_python_sdk.utils.llm import (
get_langtrace_attributes,
get_extra_attributes,
get_span_name,
set_span_attributes,
get_llm_request_attributes,
set_event_completion,
set_usage_attributes,
)
from langtrace_python_sdk.constants.instrumentation.common import SERVICE_PROVIDERS
from langtrace.trace_attributes import FrameworkSpanAttributes
from opentelemetry.trace.status import Status, StatusCode
from langtrace.trace_attributes import SpanAttributes
from opentelemetry.trace import Tracer, SpanKind

from langtrace_python_sdk.utils import deduce_args_and_kwargs, set_span_attribute
import json


def patch_initiate_chat(name, version, tracer: Tracer):
def traced_method(wrapped, instance, args, kwargs):
all_params = deduce_args_and_kwargs(wrapped, *args, **kwargs)
all_params["recipient"] = json.dumps(parse_agent(all_params.get("recipient")))
span_attributes = {
**get_langtrace_attributes(
service_provider=SERVICE_PROVIDERS["AUTOGEN"],
version=version,
vendor_type="framework",
),
"sender": json.dumps(parse_agent(instance)),
**all_params,
}
attributes = FrameworkSpanAttributes(**span_attributes)

with tracer.start_as_current_span(
name=get_span_name(name), kind=SpanKind.CLIENT
) as span:
try:
set_span_attributes(span, attributes)
result = wrapped(*args, **kwargs)
# set_response_attributes(span, result)
return result
except Exception as err:
# Record the exception in the span
span.record_exception(err)

# Set the span status to indicate an error
span.set_status(Status(StatusCode.ERROR, str(err)))

# Reraise the exception to ensure it's not swallowed
raise

return traced_method


def patch_generate_reply(name, version, tracer: Tracer):

def traced_method(wrapped, instance, args, kwargs):

llm_config = instance.llm_config
kwargs = {
**kwargs,
**llm_config.get("config_list")[0],
}
service_provider = SERVICE_PROVIDERS["AUTOGEN"]

span_attributes = {
**get_langtrace_attributes(
version=version,
service_provider=service_provider,
vendor_type="framework",
),
**get_llm_request_attributes(
kwargs,
prompts=kwargs.get("messages"),
),
**get_extra_attributes(),
}
attributes = FrameworkSpanAttributes(**span_attributes)

with tracer.start_as_current_span(
name=get_span_name(name), kind=SpanKind.CLIENT
) as span:
try:

result = wrapped(*args, **kwargs)

# if caching is disabled, return result as langtrace will instrument the rest.
if "cache_seed" in llm_config and llm_config.get("cache_seed") is None:
return result

set_span_attributes(span, attributes)
set_event_completion(span, [{"role": "assistant", "content": result}])
total_cost, response_model = list(instance.get_total_usage().keys())
set_span_attribute(
span, SpanAttributes.LLM_RESPONSE_MODEL, response_model
)
set_usage_attributes(
span, instance.get_total_usage().get(response_model)
)

return result

except Exception as err:
# Record the exception in the span
span.record_exception(err)

# Set the span status to indicate an error
span.set_status(Status(StatusCode.ERROR, str(err)))

# Reraise the exception to ensure it's not swallowed
raise

return traced_method


def set_response_attributes(span, result):
summary = getattr(result, "summary", None)
if summary:
set_span_attribute(span, "autogen.chat.summary", summary)


def parse_agent(agent):

return {
"name": getattr(agent, "name", None),
"description": getattr(agent, "description", None),
"system_message": str(getattr(agent, "system_message", None)),
"silent": getattr(agent, "silent", None),
"llm_config": str(getattr(agent, "llm_config", None)),
"human_input_mode": getattr(agent, "human_input_mode", None),
}
2 changes: 2 additions & 0 deletions src/langtrace_python_sdk/langtrace.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@
OpenAIInstrumentation,
PineconeInstrumentation,
QdrantInstrumentation,
AutogenInstrumentation,
VertexAIInstrumentation,
WeaviateInstrumentation,
)
Expand Down Expand Up @@ -139,6 +140,7 @@ def init(
"google-cloud-aiplatform": VertexAIInstrumentation(),
"google-generativeai": GeminiInstrumentation(),
"mistralai": MistralInstrumentation(),
"autogen": AutogenInstrumentation(),
}

init_instrumentations(disable_instrumentations, all_instrumentations)
Expand Down
14 changes: 14 additions & 0 deletions src/langtrace_python_sdk/utils/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
from .sdk_version_checker import SDKVersionChecker
from opentelemetry.trace import Span
from langtrace.trace_attributes import SpanAttributes
import inspect
import os


Expand All @@ -28,6 +29,19 @@ def set_event_prompt(span: Span, prompt):
)


def deduce_args_and_kwargs(func, *args, **kwargs):
sig = inspect.signature(func)
bound_args = sig.bind(*args, **kwargs)
bound_args.apply_defaults()

all_params = {}
for param_name, param in sig.parameters.items():
if param_name in bound_args.arguments:
all_params[param_name] = bound_args.arguments[param_name]

return all_params


def check_if_sdk_is_outdated():
SDKVersionChecker().check()
return
Loading