diff --git a/README.md b/README.md index 30d3e6cd..e2509029 100644 --- a/README.md +++ b/README.md @@ -229,6 +229,7 @@ prompt = get_prompt_from_registry(, options={"prompt_version": 1, " ``` ### Opt out of tracing prompt and completion data + By default, prompt and completion data are captured. If you would like to opt out of it, set the following env var, `TRACE_PROMPT_COMPLETION_DATA=false` @@ -237,30 +238,31 @@ By default, prompt and completion data are captured. If you would like to opt ou Langtrace automatically captures traces from the following vendors: -| Vendor | Type | Typescript SDK | Python SDK | -| ------------ | --------------- | ------------------ | ------------------------------- | -| OpenAI | LLM | :white_check_mark: | :white_check_mark: | -| Anthropic | LLM | :white_check_mark: | :white_check_mark: | -| Azure OpenAI | LLM | :white_check_mark: | :white_check_mark: | -| Cohere | LLM | :white_check_mark: | :white_check_mark: | -| Groq | LLM | :x: | :white_check_mark: | -| Perplexity | LLM | :white_check_mark: | :white_check_mark: | -| Gemini | LLM | :x: | :white_check_mark: | -| Mistral | LLM | :x: | :white_check_mark: | -| Langchain | Framework | :x: | :white_check_mark: | -| LlamaIndex | Framework | :white_check_mark: | :white_check_mark: | -| Langgraph | Framework | :x: | :white_check_mark: | -| DSPy | Framework | :x: | :white_check_mark: | -| CrewAI | Framework | :x: | :white_check_mark: | -| Ollama | Framework | :x: | :white_check_mark: | -| VertexAI | Framework | :x: | :white_check_mark: | -| Vercel AI SDK| Framework | :white_check_mark: | :x: | -| EmbedChain | Framework | :x: | :white_check_mark: | -| Pinecone | Vector Database | :white_check_mark: | :white_check_mark: | -| ChromaDB | Vector Database | :white_check_mark: | :white_check_mark: | -| QDrant | Vector Database | :white_check_mark: | :white_check_mark: | -| Weaviate | Vector Database | :white_check_mark: | :white_check_mark: | -| PGVector | Vector Database | :white_check_mark: | :white_check_mark: (SQLAlchemy) | +| Vendor | Type | Typescript SDK | Python SDK | +| ------------- | --------------- | ------------------ | ------------------------------- | +| OpenAI | LLM | :white_check_mark: | :white_check_mark: | +| Anthropic | LLM | :white_check_mark: | :white_check_mark: | +| Azure OpenAI | LLM | :white_check_mark: | :white_check_mark: | +| Cohere | LLM | :white_check_mark: | :white_check_mark: | +| Groq | LLM | :x: | :white_check_mark: | +| Perplexity | LLM | :white_check_mark: | :white_check_mark: | +| Gemini | LLM | :x: | :white_check_mark: | +| Mistral | LLM | :x: | :white_check_mark: | +| Langchain | Framework | :x: | :white_check_mark: | +| LlamaIndex | Framework | :white_check_mark: | :white_check_mark: | +| Langgraph | Framework | :x: | :white_check_mark: | +| DSPy | Framework | :x: | :white_check_mark: | +| CrewAI | Framework | :x: | :white_check_mark: | +| Ollama | Framework | :x: | :white_check_mark: | +| VertexAI | Framework | :x: | :white_check_mark: | +| Vercel AI SDK | Framework | :white_check_mark: | :x: | +| EmbedChain | Framework | :x: | :white_check_mark: | +| Autogen | Framework | :x: | :white_check_mark: | +| Pinecone | Vector Database | :white_check_mark: | :white_check_mark: | +| ChromaDB | Vector Database | :white_check_mark: | :white_check_mark: | +| QDrant | Vector Database | :white_check_mark: | :white_check_mark: | +| Weaviate | Vector Database | :white_check_mark: | :white_check_mark: | +| PGVector | Vector Database | :white_check_mark: | :white_check_mark: (SQLAlchemy) | --- diff --git a/src/examples/autogen_example/__init__.py b/src/examples/autogen_example/__init__.py new file mode 100644 index 00000000..e04fb9ef --- /dev/null +++ b/src/examples/autogen_example/__init__.py @@ -0,0 +1,8 @@ +from .main import main as autogen_main +from .main import comedy_show + + +class AutoGenRunner: + def run(self): + # autogen_main() + comedy_show() diff --git a/src/examples/autogen_example/main.py b/src/examples/autogen_example/main.py new file mode 100644 index 00000000..9ed54c17 --- /dev/null +++ b/src/examples/autogen_example/main.py @@ -0,0 +1,72 @@ +from langtrace_python_sdk import langtrace +from autogen import ConversableAgent +from dotenv import load_dotenv +from autogen.coding import LocalCommandLineCodeExecutor +import tempfile + + +load_dotenv() +langtrace.init(write_spans_to_console=False) +# agentops.init(api_key=os.getenv("AGENTOPS_API_KEY")) +# Create a temporary directory to store the code files. +temp_dir = tempfile.TemporaryDirectory() + + +# Create a local command line code executor. +executor = LocalCommandLineCodeExecutor( + timeout=10, # Timeout for each code execution in seconds. + work_dir=temp_dir.name, # Use the temporary directory to store the code files. +) + + +def main(): + + agent = ConversableAgent( + "chatbot", + llm_config={"config_list": [{"model": "gpt-4"}], "cache_seed": None}, + code_execution_config=False, # Turn off code execution, by default it is off. + function_map=None, # No registered functions, by default it is None. + human_input_mode="NEVER", # Never ask for human input. + ) + + reply = agent.generate_reply( + messages=[{"content": "Tell me a joke.", "role": "user"}] + ) + return reply + + +def comedy_show(): + cathy = ConversableAgent( + name="cathy", + system_message="Your name is Cathy and you are a part of a duo of comedians.", + llm_config={ + "config_list": [{"model": "gpt-4o-mini", "temperature": 0.9}], + "cache_seed": None, + }, + description="Cathy is a comedian", + max_consecutive_auto_reply=10, + code_execution_config={ + "executor": executor + }, # Use the local command line code executor. + function_map=None, + chat_messages=None, + silent=True, + default_auto_reply="Sorry, I don't know what to say.", + human_input_mode="NEVER", # Never ask for human input. + ) + + joe = ConversableAgent( + "joe", + system_message="Your name is Joe and you are a part of a duo of comedians.", + llm_config={ + "config_list": [{"model": "gpt-4o-mini", "temperature": 0.7}], + "cache_seed": None, + }, + human_input_mode="NEVER", # Never ask for human input. + ) + + result = joe.initiate_chat( + recipient=cathy, message="Cathy, tell me a joke.", max_turns=2 + ) + + return result diff --git a/src/langtrace_python_sdk/constants/instrumentation/common.py b/src/langtrace_python_sdk/constants/instrumentation/common.py index 85710ba2..70d92a1b 100644 --- a/src/langtrace_python_sdk/constants/instrumentation/common.py +++ b/src/langtrace_python_sdk/constants/instrumentation/common.py @@ -31,6 +31,7 @@ "GEMINI": "Gemini", "MISTRAL": "Mistral", "EMBEDCHAIN": "Embedchain", + "AUTOGEN": "Autogen", } LANGTRACE_ADDITIONAL_SPAN_ATTRIBUTES_KEY = "langtrace_additional_attributes" diff --git a/src/langtrace_python_sdk/instrumentation/__init__.py b/src/langtrace_python_sdk/instrumentation/__init__.py index b0e5fd26..984541dc 100644 --- a/src/langtrace_python_sdk/instrumentation/__init__.py +++ b/src/langtrace_python_sdk/instrumentation/__init__.py @@ -14,6 +14,7 @@ from .weaviate import WeaviateInstrumentation from .ollama import OllamaInstrumentor from .dspy import DspyInstrumentation +from .autogen import AutogenInstrumentation from .vertexai import VertexAIInstrumentation from .gemini import GeminiInstrumentation from .mistral import MistralInstrumentation @@ -37,6 +38,7 @@ "WeaviateInstrumentation", "OllamaInstrumentor", "DspyInstrumentation", + "AutogenInstrumentation", "VertexAIInstrumentation", "GeminiInstrumentation", "MistralInstrumentation", diff --git a/src/langtrace_python_sdk/instrumentation/autogen/__init__.py b/src/langtrace_python_sdk/instrumentation/autogen/__init__.py new file mode 100644 index 00000000..c152b011 --- /dev/null +++ b/src/langtrace_python_sdk/instrumentation/autogen/__init__.py @@ -0,0 +1,3 @@ +from .instrumentation import AutogenInstrumentation + +__all__ = ["AutogenInstrumentation"] diff --git a/src/langtrace_python_sdk/instrumentation/autogen/instrumentation.py b/src/langtrace_python_sdk/instrumentation/autogen/instrumentation.py new file mode 100644 index 00000000..06f89824 --- /dev/null +++ b/src/langtrace_python_sdk/instrumentation/autogen/instrumentation.py @@ -0,0 +1,42 @@ +from opentelemetry.instrumentation.instrumentor import BaseInstrumentor +from opentelemetry.trace import get_tracer +from wrapt import wrap_function_wrapper as _W +from importlib_metadata import version as v +from .patch import patch_generate_reply, patch_initiate_chat + + +class AutogenInstrumentation(BaseInstrumentor): + def instrumentation_dependencies(self): + return ["autogen >= 0.1.0"] + + def _instrument(self, **kwargs): + print("Instrumneting autogen") + tracer_provider = kwargs.get("tracer_provider") + tracer = get_tracer(__name__, "", tracer_provider) + version = v("autogen") + # conversable_agent.intiate_chat + # conversable_agent.register_function + # agent.Agent + # AgentCreation + # Tools --> Register_for_llm, register_for_execution, register_for_function + try: + _W( + module="autogen.agentchat.conversable_agent", + name="ConversableAgent.initiate_chat", + wrapper=patch_initiate_chat( + "conversable_agent.initiate_chat", version, tracer + ), + ) + + _W( + module="autogen.agentchat.conversable_agent", + name="ConversableAgent.generate_reply", + wrapper=patch_generate_reply( + "conversable_agent.generate_reply", version, tracer + ), + ) + except Exception as e: + pass + + def _uninstrument(self, **kwargs): + pass diff --git a/src/langtrace_python_sdk/instrumentation/autogen/patch.py b/src/langtrace_python_sdk/instrumentation/autogen/patch.py new file mode 100644 index 00000000..ce6c7ff2 --- /dev/null +++ b/src/langtrace_python_sdk/instrumentation/autogen/patch.py @@ -0,0 +1,132 @@ +from langtrace_python_sdk.utils.llm import ( + get_langtrace_attributes, + get_extra_attributes, + get_span_name, + set_span_attributes, + get_llm_request_attributes, + set_event_completion, + set_usage_attributes, +) +from langtrace_python_sdk.constants.instrumentation.common import SERVICE_PROVIDERS +from langtrace.trace_attributes import FrameworkSpanAttributes +from opentelemetry.trace.status import Status, StatusCode +from langtrace.trace_attributes import SpanAttributes +from opentelemetry.trace import Tracer, SpanKind + +from langtrace_python_sdk.utils import deduce_args_and_kwargs, set_span_attribute +import json + + +def patch_initiate_chat(name, version, tracer: Tracer): + def traced_method(wrapped, instance, args, kwargs): + all_params = deduce_args_and_kwargs(wrapped, *args, **kwargs) + all_params["recipient"] = json.dumps(parse_agent(all_params.get("recipient"))) + span_attributes = { + **get_langtrace_attributes( + service_provider=SERVICE_PROVIDERS["AUTOGEN"], + version=version, + vendor_type="framework", + ), + "sender": json.dumps(parse_agent(instance)), + **all_params, + } + attributes = FrameworkSpanAttributes(**span_attributes) + + with tracer.start_as_current_span( + name=get_span_name(name), kind=SpanKind.CLIENT + ) as span: + try: + set_span_attributes(span, attributes) + result = wrapped(*args, **kwargs) + # set_response_attributes(span, result) + return result + except Exception as err: + # Record the exception in the span + span.record_exception(err) + + # Set the span status to indicate an error + span.set_status(Status(StatusCode.ERROR, str(err))) + + # Reraise the exception to ensure it's not swallowed + raise + + return traced_method + + +def patch_generate_reply(name, version, tracer: Tracer): + + def traced_method(wrapped, instance, args, kwargs): + + llm_config = instance.llm_config + kwargs = { + **kwargs, + **llm_config.get("config_list")[0], + } + service_provider = SERVICE_PROVIDERS["AUTOGEN"] + + span_attributes = { + **get_langtrace_attributes( + version=version, + service_provider=service_provider, + vendor_type="framework", + ), + **get_llm_request_attributes( + kwargs, + prompts=kwargs.get("messages"), + ), + **get_extra_attributes(), + } + attributes = FrameworkSpanAttributes(**span_attributes) + + with tracer.start_as_current_span( + name=get_span_name(name), kind=SpanKind.CLIENT + ) as span: + try: + + result = wrapped(*args, **kwargs) + + # if caching is disabled, return result as langtrace will instrument the rest. + if "cache_seed" in llm_config and llm_config.get("cache_seed") is None: + return result + + set_span_attributes(span, attributes) + set_event_completion(span, [{"role": "assistant", "content": result}]) + total_cost, response_model = list(instance.get_total_usage().keys()) + set_span_attribute( + span, SpanAttributes.LLM_RESPONSE_MODEL, response_model + ) + set_usage_attributes( + span, instance.get_total_usage().get(response_model) + ) + + return result + + except Exception as err: + # Record the exception in the span + span.record_exception(err) + + # Set the span status to indicate an error + span.set_status(Status(StatusCode.ERROR, str(err))) + + # Reraise the exception to ensure it's not swallowed + raise + + return traced_method + + +def set_response_attributes(span, result): + summary = getattr(result, "summary", None) + if summary: + set_span_attribute(span, "autogen.chat.summary", summary) + + +def parse_agent(agent): + + return { + "name": getattr(agent, "name", None), + "description": getattr(agent, "description", None), + "system_message": str(getattr(agent, "system_message", None)), + "silent": getattr(agent, "silent", None), + "llm_config": str(getattr(agent, "llm_config", None)), + "human_input_mode": getattr(agent, "human_input_mode", None), + } diff --git a/src/langtrace_python_sdk/langtrace.py b/src/langtrace_python_sdk/langtrace.py index 7be1bf9f..738c418b 100644 --- a/src/langtrace_python_sdk/langtrace.py +++ b/src/langtrace_python_sdk/langtrace.py @@ -52,6 +52,7 @@ OpenAIInstrumentation, PineconeInstrumentation, QdrantInstrumentation, + AutogenInstrumentation, VertexAIInstrumentation, WeaviateInstrumentation, ) @@ -139,6 +140,7 @@ def init( "google-cloud-aiplatform": VertexAIInstrumentation(), "google-generativeai": GeminiInstrumentation(), "mistralai": MistralInstrumentation(), + "autogen": AutogenInstrumentation(), } init_instrumentations(disable_instrumentations, all_instrumentations) diff --git a/src/langtrace_python_sdk/utils/__init__.py b/src/langtrace_python_sdk/utils/__init__.py index df6925f3..19fcab32 100644 --- a/src/langtrace_python_sdk/utils/__init__.py +++ b/src/langtrace_python_sdk/utils/__init__.py @@ -2,6 +2,7 @@ from .sdk_version_checker import SDKVersionChecker from opentelemetry.trace import Span from langtrace.trace_attributes import SpanAttributes +import inspect import os @@ -28,6 +29,19 @@ def set_event_prompt(span: Span, prompt): ) +def deduce_args_and_kwargs(func, *args, **kwargs): + sig = inspect.signature(func) + bound_args = sig.bind(*args, **kwargs) + bound_args.apply_defaults() + + all_params = {} + for param_name, param in sig.parameters.items(): + if param_name in bound_args.arguments: + all_params[param_name] = bound_args.arguments[param_name] + + return all_params + + def check_if_sdk_is_outdated(): SDKVersionChecker().check() return diff --git a/src/langtrace_python_sdk/utils/llm.py b/src/langtrace_python_sdk/utils/llm.py index 21594e58..6d9647e7 100644 --- a/src/langtrace_python_sdk/utils/llm.py +++ b/src/langtrace_python_sdk/utils/llm.py @@ -126,7 +126,9 @@ def get_llm_request_attributes(kwargs, prompts=None, model=None, operation_name= tools = kwargs.get("tools", None) return { SpanAttributes.LLM_OPERATION_NAME: operation_name, - SpanAttributes.LLM_REQUEST_MODEL: model or kwargs.get("model") or "gpt-3.5-turbo", + SpanAttributes.LLM_REQUEST_MODEL: model + or kwargs.get("model") + or "gpt-3.5-turbo", SpanAttributes.LLM_IS_STREAMING: kwargs.get("stream"), SpanAttributes.LLM_REQUEST_TEMPERATURE: kwargs.get("temperature"), SpanAttributes.LLM_TOP_K: top_k, @@ -230,7 +232,15 @@ def set_event_completion(span: Span, result_content): def set_span_attributes(span: Span, attributes: Any) -> None: - for field, value in attributes.model_dump(by_alias=True).items(): + from pydantic import BaseModel + + attrs = ( + attributes.model_dump(by_alias=True) + if isinstance(attributes, BaseModel) + else attributes + ) + + for field, value in attrs.items(): set_span_attribute(span, field, value) diff --git a/src/run_example.py b/src/run_example.py index cddd1e42..864bc8c2 100644 --- a/src/run_example.py +++ b/src/run_example.py @@ -2,11 +2,11 @@ ENABLED_EXAMPLES = { "anthropic": False, - "azureopenai": True, + "azureopenai": False, "chroma": False, "cohere": False, "fastapi": False, - "langchain": True, + "langchain": False, "llamaindex": False, "hiveagent": False, "openai": False, @@ -16,6 +16,7 @@ "weaviate": False, "ollama": False, "groq": False, + "autogen": True, "vertexai": False, "gemini": False, "mistral": False, @@ -93,6 +94,11 @@ print(Fore.BLUE + "Running Groq example" + Fore.RESET) GroqRunner().run() +if ENABLED_EXAMPLES["autogen"]: + from examples.autogen_example import AutoGenRunner + + print(Fore.BLUE + "Running Autogen example" + Fore.RESET) + AutoGenRunner().run() if ENABLED_EXAMPLES["vertexai"]: from examples.vertexai_example import VertexAIRunner