From 390039dfa69a88037038a7d9881153005d3684a4 Mon Sep 17 00:00:00 2001 From: Karthik Kalyanaraman Date: Wed, 24 Jul 2024 13:02:42 -0700 Subject: [PATCH 1/5] Add OTLP example --- .../ollama_example/basic_example_2.py | 34 ----------- .../otlp_example/otlp_with_langtrace.py | 59 +++++++++++++++++++ 2 files changed, 59 insertions(+), 34 deletions(-) delete mode 100644 src/examples/ollama_example/basic_example_2.py create mode 100644 src/examples/otlp_example/otlp_with_langtrace.py diff --git a/src/examples/ollama_example/basic_example_2.py b/src/examples/ollama_example/basic_example_2.py deleted file mode 100644 index e9ecb7df..00000000 --- a/src/examples/ollama_example/basic_example_2.py +++ /dev/null @@ -1,34 +0,0 @@ -from langtrace_python_sdk import langtrace -from openai import OpenAI -from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter - -service_name = "langtrace-python-ollama" -otlp_endpoint = "http://localhost:4318/v1/traces" -otlp_exporter = OTLPSpanExporter( - endpoint=otlp_endpoint, - headers=(("Content-Type", "application/json"),)) -langtrace.init(custom_remote_exporter=otlp_exporter, batch=False) - - -def chat_with_ollama(): - # Use the OpenAI endpoint, not the Ollama API. - base_url = "http://localhost:11434/v1" - client = OpenAI(base_url=base_url, api_key="unused") - messages = [ - { - "role": "user", - "content": "Hello, I'm a human.", - }, - ] - chat_completion = client.chat.completions.create( - model="llama3", messages=messages - ) - print(chat_completion.choices[0].message.content) - - -def main(): - chat_with_ollama() - - -if __name__ == "__main__": - main() diff --git a/src/examples/otlp_example/otlp_with_langtrace.py b/src/examples/otlp_example/otlp_with_langtrace.py new file mode 100644 index 00000000..b880fce6 --- /dev/null +++ b/src/examples/otlp_example/otlp_with_langtrace.py @@ -0,0 +1,59 @@ +# Instructions +# 1. Run the OpenTelemetry Collector with the OTLP receiver enabled +# Create otel-config.yaml with the following content: +# receivers: +# otlp: +# protocols: +# grpc: +# endpoint: "0.0.0.0:4317" +# http: +# endpoint: "0.0.0.0:4318" + +# exporters: +# logging: +# loglevel: debug + +# service: +# pipelines: +# traces: +# receivers: [otlp] +# exporters: [logging] +# docker pull otel/opentelemetry-collector:latest +# docker run --rm -p 4317:4317 -p 4318:4318 -v $(pwd)/otel-config.yaml:/otel-config.yaml otel/opentelemetry-collector --config otel-config.yaml +# 2. Run the following code + +from langtrace_python_sdk import langtrace +from openai import OpenAI +from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter + + +# Configure the OTLP exporter to use the correct endpoint and API key +otlp_endpoint = "http://localhost:4318/v1/traces" +otlp_exporter = OTLPSpanExporter( + endpoint=otlp_endpoint, + headers=(("Content-Type", "application/json"),)) +langtrace.init(custom_remote_exporter=otlp_exporter, batch=False) + + +def chat_with_openai(): + client = OpenAI(api_key="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") + messages = [ + { + "role": "user", + "content": "Hello, I'm a human.", + }, + ] + chat_completion = client.chat.completions.create( + messages=messages, + stream=False, + model="gpt-3.5-turbo", + ) + print(chat_completion.choices[0].message.content) + + +def main(): + chat_with_openai() + + +if __name__ == "__main__": + main() From 6623f20e79503fa55dd22f2976b026db17c79250 Mon Sep 17 00:00:00 2001 From: Karthik Kalyanaraman <105607645+karthikscale3@users.noreply.github.com> Date: Wed, 24 Jul 2024 13:05:38 -0700 Subject: [PATCH 2/5] Add OTLP example (#259) --- .../ollama_example/basic_example_2.py | 34 ----------- .../otlp_example/otlp_with_langtrace.py | 59 +++++++++++++++++++ 2 files changed, 59 insertions(+), 34 deletions(-) delete mode 100644 src/examples/ollama_example/basic_example_2.py create mode 100644 src/examples/otlp_example/otlp_with_langtrace.py diff --git a/src/examples/ollama_example/basic_example_2.py b/src/examples/ollama_example/basic_example_2.py deleted file mode 100644 index e9ecb7df..00000000 --- a/src/examples/ollama_example/basic_example_2.py +++ /dev/null @@ -1,34 +0,0 @@ -from langtrace_python_sdk import langtrace -from openai import OpenAI -from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter - -service_name = "langtrace-python-ollama" -otlp_endpoint = "http://localhost:4318/v1/traces" -otlp_exporter = OTLPSpanExporter( - endpoint=otlp_endpoint, - headers=(("Content-Type", "application/json"),)) -langtrace.init(custom_remote_exporter=otlp_exporter, batch=False) - - -def chat_with_ollama(): - # Use the OpenAI endpoint, not the Ollama API. - base_url = "http://localhost:11434/v1" - client = OpenAI(base_url=base_url, api_key="unused") - messages = [ - { - "role": "user", - "content": "Hello, I'm a human.", - }, - ] - chat_completion = client.chat.completions.create( - model="llama3", messages=messages - ) - print(chat_completion.choices[0].message.content) - - -def main(): - chat_with_ollama() - - -if __name__ == "__main__": - main() diff --git a/src/examples/otlp_example/otlp_with_langtrace.py b/src/examples/otlp_example/otlp_with_langtrace.py new file mode 100644 index 00000000..52851dfa --- /dev/null +++ b/src/examples/otlp_example/otlp_with_langtrace.py @@ -0,0 +1,59 @@ +# Instructions +# 1. Run the OpenTelemetry Collector with the OTLP receiver enabled +# Create otel-config.yaml with the following content: +# receivers: +# otlp: +# protocols: +# grpc: +# endpoint: "0.0.0.0:4317" +# http: +# endpoint: "0.0.0.0:4318" + +# exporters: +# logging: +# loglevel: debug + +# service: +# pipelines: +# traces: +# receivers: [otlp] +# exporters: [logging] +# docker pull otel/opentelemetry-collector:latest +# docker run --rm -p 4317:4317 -p 4318:4318 -v $(pwd)/otel-config.yaml:/otel-config.yaml otel/opentelemetry-collector --config otel-config.yaml +# 2. Run the following code + +from langtrace_python_sdk import langtrace +from openai import OpenAI +from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter + + +# Configure the OTLP exporter to use the correct endpoint and API key +otlp_endpoint = "http://localhost:4318/v1/traces" +otlp_exporter = OTLPSpanExporter( + endpoint=otlp_endpoint, + headers=(("Content-Type", "application/json"),)) +langtrace.init(custom_remote_exporter=otlp_exporter, batch=False) + + +def chat_with_openai(): + client = OpenAI() + messages = [ + { + "role": "user", + "content": "Hello, I'm a human.", + }, + ] + chat_completion = client.chat.completions.create( + messages=messages, + stream=False, + model="gpt-3.5-turbo", + ) + print(chat_completion.choices[0].message.content) + + +def main(): + chat_with_openai() + + +if __name__ == "__main__": + main() From 379e22d82e04f8ae462203605da534aa2c16d49e Mon Sep 17 00:00:00 2001 From: Ali Waleed <134522290+alizenhom@users.noreply.github.com> Date: Thu, 25 Jul 2024 00:23:19 +0300 Subject: [PATCH 3/5] Adhere to otel span attributes (#257) * adhere to otel's semconv * lock attributes version * lock to 7.0.0 trace attributes * bump version --------- Co-authored-by: Karthik Kalyanaraman --- pyproject.toml | 2 +- .../instrumentation/cohere/patch.py | 4 ++-- .../instrumentation/openai/patch.py | 10 +++++----- src/langtrace_python_sdk/utils/llm.py | 3 ++- src/langtrace_python_sdk/version.py | 2 +- 5 files changed, 11 insertions(+), 10 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 61bc1d8a..f5ea9d76 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -18,7 +18,7 @@ classifiers=[ "Operating System :: OS Independent", ] dependencies = [ - 'trace-attributes>=6.0.3,<7.0.0', + 'trace-attributes==7.0.0', 'opentelemetry-api>=1.25.0', 'opentelemetry-sdk>=1.25.0', 'opentelemetry-instrumentation>=0.46b0', diff --git a/src/langtrace_python_sdk/instrumentation/cohere/patch.py b/src/langtrace_python_sdk/instrumentation/cohere/patch.py index 7fe72376..e3e26dc1 100644 --- a/src/langtrace_python_sdk/instrumentation/cohere/patch.py +++ b/src/langtrace_python_sdk/instrumentation/cohere/patch.py @@ -44,7 +44,7 @@ def traced_method(wrapped, instance, args, kwargs): span_attributes = { **get_langtrace_attributes(version, service_provider), - **get_llm_request_attributes(kwargs), + **get_llm_request_attributes(kwargs, operation_name="rerank"), **get_llm_url(instance), SpanAttributes.LLM_REQUEST_MODEL: kwargs.get("model") or "command-r-plus", SpanAttributes.LLM_URL: APIS["RERANK"]["URL"], @@ -121,7 +121,7 @@ def traced_method(wrapped, instance, args, kwargs): span_attributes = { **get_langtrace_attributes(version, service_provider), - **get_llm_request_attributes(kwargs), + **get_llm_request_attributes(kwargs, operation_name="embed"), **get_llm_url(instance), SpanAttributes.LLM_URL: APIS["EMBED"]["URL"], SpanAttributes.LLM_PATH: APIS["EMBED"]["ENDPOINT"], diff --git a/src/langtrace_python_sdk/instrumentation/openai/patch.py b/src/langtrace_python_sdk/instrumentation/openai/patch.py index b5d7ff52..a70ca630 100644 --- a/src/langtrace_python_sdk/instrumentation/openai/patch.py +++ b/src/langtrace_python_sdk/instrumentation/openai/patch.py @@ -55,7 +55,7 @@ def traced_method(wrapped, instance, args, kwargs): service_provider = SERVICE_PROVIDERS["OPENAI"] span_attributes = { **get_langtrace_attributes(version, service_provider, vendor_type="llm"), - **get_llm_request_attributes(kwargs), + **get_llm_request_attributes(kwargs, operation_name="images_generate"), **get_llm_url(instance), SpanAttributes.LLM_PATH: APIS["IMAGES_GENERATION"]["ENDPOINT"], **get_extra_attributes(), @@ -118,7 +118,7 @@ async def traced_method(wrapped, instance, args, kwargs): span_attributes = { **get_langtrace_attributes(version, service_provider, vendor_type="llm"), - **get_llm_request_attributes(kwargs), + **get_llm_request_attributes(kwargs, operation_name="images_generate"), **get_llm_url(instance), SpanAttributes.LLM_PATH: APIS["IMAGES_GENERATION"]["ENDPOINT"], **get_extra_attributes(), @@ -181,7 +181,7 @@ def traced_method(wrapped, instance, args, kwargs): span_attributes = { **get_langtrace_attributes(version, service_provider, vendor_type="llm"), - **get_llm_request_attributes(kwargs), + **get_llm_request_attributes(kwargs, operation_name="images_edit"), **get_llm_url(instance), SpanAttributes.LLM_PATH: APIS["IMAGES_EDIT"]["ENDPOINT"], SpanAttributes.LLM_RESPONSE_FORMAT: kwargs.get("response_format"), @@ -432,7 +432,7 @@ def traced_method(wrapped, instance, args, kwargs): span_attributes = { **get_langtrace_attributes(version, service_provider, vendor_type="llm"), - **get_llm_request_attributes(kwargs), + **get_llm_request_attributes(kwargs, operation_name="embed"), **get_llm_url(instance), SpanAttributes.LLM_PATH: APIS["EMBEDDINGS_CREATE"]["ENDPOINT"], SpanAttributes.LLM_REQUEST_DIMENSIONS: kwargs.get("dimensions"), @@ -490,7 +490,7 @@ async def traced_method(wrapped, instance, args, kwargs): span_attributes = { **get_langtrace_attributes(version, service_provider, vendor_type="llm"), - **get_llm_request_attributes(kwargs), + **get_llm_request_attributes(kwargs, operation_name="embed"), SpanAttributes.LLM_PATH: APIS["EMBEDDINGS_CREATE"]["ENDPOINT"], SpanAttributes.LLM_REQUEST_DIMENSIONS: kwargs.get("dimensions"), **get_extra_attributes(), diff --git a/src/langtrace_python_sdk/utils/llm.py b/src/langtrace_python_sdk/utils/llm.py index 4aefc912..bb00d18f 100644 --- a/src/langtrace_python_sdk/utils/llm.py +++ b/src/langtrace_python_sdk/utils/llm.py @@ -92,7 +92,7 @@ def get_langtrace_attributes(version, service_provider, vendor_type="llm"): } -def get_llm_request_attributes(kwargs, prompts=None, model=None): +def get_llm_request_attributes(kwargs, prompts=None, model=None, operation_name="chat"): user = kwargs.get("user", None) if prompts is None: @@ -111,6 +111,7 @@ def get_llm_request_attributes(kwargs, prompts=None, model=None): top_p = kwargs.get("p", None) or kwargs.get("top_p", None) tools = kwargs.get("tools", None) return { + SpanAttributes.LLM_OPERATION_NAME: operation_name, SpanAttributes.LLM_REQUEST_MODEL: model or kwargs.get("model"), SpanAttributes.LLM_IS_STREAMING: kwargs.get("stream"), SpanAttributes.LLM_REQUEST_TEMPERATURE: kwargs.get("temperature"), diff --git a/src/langtrace_python_sdk/version.py b/src/langtrace_python_sdk/version.py index a7ecb802..90a1f38f 100644 --- a/src/langtrace_python_sdk/version.py +++ b/src/langtrace_python_sdk/version.py @@ -1 +1 @@ -__version__ = "2.2.6" +__version__ = "2.2.7" From 4cc4799fbfa250daf28ccde198c6ef65f9871909 Mon Sep 17 00:00:00 2001 From: Karthik Kalyanaraman <105607645+karthikscale3@users.noreply.github.com> Date: Tue, 30 Jul 2024 17:18:59 -0700 Subject: [PATCH 4/5] Bump opentelemetry dependency (#269) * Bump opentelemetry-instrumentation>=0.47b0 * Bump version --- pyproject.toml | 2 +- src/langtrace_python_sdk/version.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index f5ea9d76..c6c926d2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -21,7 +21,7 @@ dependencies = [ 'trace-attributes==7.0.0', 'opentelemetry-api>=1.25.0', 'opentelemetry-sdk>=1.25.0', - 'opentelemetry-instrumentation>=0.46b0', + 'opentelemetry-instrumentation>=0.47b0', 'opentelemetry-instrumentation-sqlalchemy>=0.46b0', 'opentelemetry-exporter-otlp-proto-http>=1.25.0', 'opentelemetry-exporter-otlp-proto-grpc>=1.25.0', diff --git a/src/langtrace_python_sdk/version.py b/src/langtrace_python_sdk/version.py index 7abc065e..aed1580f 100644 --- a/src/langtrace_python_sdk/version.py +++ b/src/langtrace_python_sdk/version.py @@ -1 +1 @@ -__version__ = "2.2.12" +__version__ = "2.2.13" From abf38d733c0a83831f6d84f084ffe1845fd6a635 Mon Sep 17 00:00:00 2001 From: darshit-s3 <119623510+darshit-s3@users.noreply.github.com> Date: Wed, 31 Jul 2024 11:44:13 -0700 Subject: [PATCH 5/5] Fix gemini sdk version add (#271) * fix: generativeai sfk version fetch for gemini * chore: bump version for gemini sdk version fix --- .../instrumentation/gemini/instrumentation.py | 2 +- src/langtrace_python_sdk/version.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/langtrace_python_sdk/instrumentation/gemini/instrumentation.py b/src/langtrace_python_sdk/instrumentation/gemini/instrumentation.py index 675a049d..e98b1ede 100644 --- a/src/langtrace_python_sdk/instrumentation/gemini/instrumentation.py +++ b/src/langtrace_python_sdk/instrumentation/gemini/instrumentation.py @@ -14,7 +14,7 @@ def instrumentation_dependencies(self) -> Collection[str]: def _instrument(self, **kwargs): trace_provider = kwargs.get("tracer_provider") tracer = get_tracer(__name__, "", trace_provider) - version = v("google-cloud-aiplatform") + version = v("google-generativeai") for _, api_config in APIS.items(): module = api_config.get("module") diff --git a/src/langtrace_python_sdk/version.py b/src/langtrace_python_sdk/version.py index aed1580f..223af0cb 100644 --- a/src/langtrace_python_sdk/version.py +++ b/src/langtrace_python_sdk/version.py @@ -1 +1 @@ -__version__ = "2.2.13" +__version__ = "2.2.14"