-
Notifications
You must be signed in to change notification settings - Fork 752
feat(transformers): implement the support to emitting events in addition to current behavior #2940
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Open
LuizDMM
wants to merge
3
commits into
traceloop:main
Choose a base branch
from
LuizDMM:implement-events-transformers
base: main
Could not load branches
Branch not found: {{ refName }}
Loading
Could not load tags
Nothing to show
Loading
Are you sure you want to change the base?
Some commits from the old base branch may be removed from the timeline,
and old review comments may become outdated.
Open
Changes from 1 commit
Commits
Show all changes
3 commits
Select commit
Hold shift + click to select a range
File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
1 change: 1 addition & 0 deletions
1
...lemetry-instrumentation-transformers/opentelemetry/instrumentation/transformers/config.py
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,2 +1,3 @@ | ||
class Config: | ||
exception_logger = None | ||
use_legacy_attributes = True |
140 changes: 140 additions & 0 deletions
140
...-instrumentation-transformers/opentelemetry/instrumentation/transformers/event_emitter.py
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,140 @@ | ||
from enum import Enum | ||
from typing import Union | ||
|
||
from opentelemetry._events import Event | ||
from opentelemetry.instrumentation.transformers.event_models import ( | ||
CompletionEvent, | ||
PromptEvent, | ||
) | ||
from opentelemetry.instrumentation.transformers.utils import ( | ||
dont_throw, | ||
should_emit_events, | ||
should_send_prompts, | ||
) | ||
from opentelemetry.semconv._incubating.attributes import ( | ||
gen_ai_attributes as GenAIAttributes, | ||
) | ||
|
||
|
||
class Roles(Enum): | ||
USER = "user" | ||
ASSISTANT = "assistant" | ||
SYSTEM = "system" | ||
TOOL = "tool" | ||
|
||
|
||
VALID_MESSAGE_ROLES = {role.value for role in Roles} | ||
"""The valid roles for naming the message event.""" | ||
|
||
EVENT_ATTRIBUTES = {GenAIAttributes.GEN_AI_SYSTEM: "transformers"} | ||
"""The attributes to be used for the event.""" | ||
|
||
|
||
@dont_throw | ||
def emit_prompt_events(args, kwargs, event_logger) -> None: | ||
if not should_emit_events() or event_logger is None: | ||
return | ||
|
||
if args and len(args) > 0: | ||
prompts_list = args[0] | ||
else: | ||
prompts_list = kwargs.get("args") | ||
|
||
if isinstance(prompts_list, str): | ||
prompts_list = [prompts_list] | ||
|
||
for prompt in prompts_list: | ||
emit_event(PromptEvent(content=prompt, role="user"), event_logger) | ||
|
||
|
||
@dont_throw | ||
def emit_response_events(response, event_logger) -> None: | ||
if response and len(response) > 0: | ||
for i, completion in enumerate(response): | ||
emit_event( | ||
CompletionEvent( | ||
index=i, | ||
message={ | ||
"content": completion.get("generated_text"), | ||
"role": "assistant", | ||
}, | ||
finish_reason="unknown", | ||
), | ||
event_logger, | ||
) | ||
|
||
|
||
def emit_event(event: Union[PromptEvent, CompletionEvent], event_logger) -> None: | ||
""" | ||
Emit an event to the OpenTelemetry SDK. | ||
|
||
Args: | ||
event: The event to emit. | ||
""" | ||
if not should_emit_events(): | ||
return | ||
|
||
if isinstance(event, PromptEvent): | ||
_emit_prompt_event(event, event_logger) | ||
elif isinstance(event, CompletionEvent): | ||
_emit_completion_event(event, event_logger) | ||
else: | ||
raise TypeError("Unsupported event type") | ||
|
||
|
||
def _emit_prompt_event(event: PromptEvent, event_logger) -> None: | ||
body = { | ||
"content": event.content, | ||
"role": event.role, | ||
"tool_calls": event.tool_calls, | ||
} | ||
|
||
if event.role in VALID_MESSAGE_ROLES: | ||
name = "gen_ai.{}.message".format(event.role) | ||
# According to the semantic conventions, the role is conditionally required if available | ||
# and not equal to the "role" in the message name. So, remove the role from the body if | ||
# it is the same as the in the event name. | ||
body.pop("role", None) | ||
else: | ||
name = "gen_ai.user.message" | ||
|
||
# According to the semantic conventions, only the assistant role has tool call | ||
if event.role != Roles.ASSISTANT.value and event.tool_calls is not None: | ||
del body["tool_calls"] | ||
elif event.tool_calls is None: | ||
del body["tool_calls"] | ||
|
||
if not should_send_prompts(): | ||
del body["content"] | ||
if body.get("tool_calls") is not None: | ||
for tool_call in body["tool_calls"]: | ||
tool_call["function"].pop("arguments", None) | ||
|
||
event_logger.emit(Event(name=name, body=body, attributes=EVENT_ATTRIBUTES)) | ||
|
||
|
||
def _emit_completion_event(event: CompletionEvent, event_logger) -> None: | ||
body = { | ||
"index": event.index, | ||
"message": event.message, | ||
"finish_reason": event.finish_reason, | ||
"tool_calls": event.tool_calls, | ||
} | ||
|
||
if event.message["role"] == Roles.ASSISTANT.value: | ||
# According to the semantic conventions, the role is conditionally required if available | ||
# and not equal to "assistant", so remove the role from the body if it is "assistant". | ||
body["message"].pop("role", None) | ||
|
||
if event.tool_calls is None: | ||
del body["tool_calls"] | ||
|
||
if not should_send_prompts(): | ||
body["message"].pop("content", None) | ||
if body.get("tool_calls") is not None: | ||
for tool_call in body["tool_calls"]: | ||
tool_call["function"].pop("arguments", None) | ||
|
||
event_logger.emit( | ||
Event(name="gen_ai.choice", body=body, attributes=EVENT_ATTRIBUTES) | ||
) |
48 changes: 48 additions & 0 deletions
48
...y-instrumentation-transformers/opentelemetry/instrumentation/transformers/event_models.py
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,48 @@ | ||
from dataclasses import dataclass | ||
from typing import Any, List, Literal, Optional, TypedDict | ||
|
||
|
||
class _FunctionToolCall(TypedDict): | ||
function_name: str | ||
arguments: Optional[dict[str, Any]] | ||
|
||
|
||
class ToolCall(TypedDict): | ||
"""Represents a tool call in the AI model.""" | ||
|
||
id: str | ||
function: _FunctionToolCall | ||
type: Literal["function"] | ||
|
||
|
||
class CompletionMessage(TypedDict): | ||
"""Represents a message in the AI model.""" | ||
|
||
content: Any | ||
role: str = "assistant" | ||
|
||
|
||
@dataclass | ||
class PromptEvent: | ||
"""Represents an input event for the AI model.""" | ||
|
||
content: Any | ||
role: str = "user" | ||
tool_calls: Optional[List[ToolCall]] = None | ||
|
||
|
||
@dataclass | ||
class CompletionEvent: | ||
"""Represents a completion event for the AI model.""" | ||
|
||
index: int | ||
message: CompletionMessage | ||
finish_reason: str = "unknown" | ||
tool_calls: Optional[List[ToolCall]] = None | ||
|
||
@property | ||
def total_tokens(self) -> Optional[int]: | ||
"""Returns the total number of tokens used in the event.""" | ||
if self.input_tokens is None or self.output_tokens is None: | ||
return None | ||
return self.input_tokens + self.output_tokens |
82 changes: 82 additions & 0 deletions
82
...try-instrumentation-transformers/opentelemetry/instrumentation/transformers/span_utils.py
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,82 @@ | ||
from opentelemetry.instrumentation.transformers.utils import dont_throw | ||
from opentelemetry.semconv_ai import ( | ||
SpanAttributes, | ||
) | ||
|
||
|
||
def _set_span_attribute(span, name, value): | ||
if value is not None: | ||
if value != "": | ||
span.set_attribute(name, value) | ||
return | ||
|
||
|
||
@dont_throw | ||
def set_input_attributes(span, instance, args, kwargs): | ||
if not span.is_recording(): | ||
return | ||
|
||
if args and len(args) > 0: | ||
prompts_list = args[0] | ||
else: | ||
prompts_list = kwargs.get("args") | ||
|
||
_set_span_prompts(span, prompts_list) | ||
|
||
|
||
@dont_throw | ||
def set_model_input_attributes(span, instance): | ||
if not span.is_recording(): | ||
return | ||
|
||
forward_params = instance._forward_params | ||
|
||
_set_span_attribute( | ||
span, SpanAttributes.LLM_REQUEST_MODEL, instance.model.config.name_or_path | ||
) | ||
_set_span_attribute( | ||
span, SpanAttributes.LLM_SYSTEM, instance.model.config.model_type | ||
) | ||
_set_span_attribute(span, SpanAttributes.LLM_REQUEST_TYPE, "completion") | ||
_set_span_attribute( | ||
span, SpanAttributes.LLM_REQUEST_TEMPERATURE, forward_params.get("temperature") | ||
) | ||
_set_span_attribute( | ||
span, SpanAttributes.LLM_REQUEST_TOP_P, forward_params.get("top_p") | ||
) | ||
_set_span_attribute( | ||
span, SpanAttributes.LLM_REQUEST_MAX_TOKENS, forward_params.get("max_length") | ||
) | ||
_set_span_attribute( | ||
span, | ||
SpanAttributes.LLM_REQUEST_REPETITION_PENALTY, | ||
forward_params.get("repetition_penalty"), | ||
) | ||
|
||
|
||
@dont_throw | ||
def set_response_attributes(span, response): | ||
if response and span.is_recording(): | ||
if len(response) > 0: | ||
_set_span_completions(span, response) | ||
|
||
|
||
def _set_span_completions(span, completions): | ||
if completions is None: | ||
return | ||
|
||
for i, completion in enumerate(completions): | ||
prefix = f"{SpanAttributes.LLM_COMPLETIONS}.{i}" | ||
_set_span_attribute(span, f"{prefix}.content", completion.get("generated_text")) | ||
|
||
|
||
def _set_span_prompts(span, messages): | ||
if messages is None: | ||
return | ||
|
||
if isinstance(messages, str): | ||
messages = [messages] | ||
|
||
for i, msg in enumerate(messages): | ||
prefix = f"{SpanAttributes.LLM_PROMPTS}.{i}" | ||
_set_span_attribute(span, f"{prefix}.content", msg) |
Oops, something went wrong.
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
Uh oh!
There was an error while loading. Please reload this page.