Skip to content

Commit

Permalink
Test cleanup.
Browse files Browse the repository at this point in the history
  • Loading branch information
umaannamalai committed Oct 12, 2023
1 parent d057663 commit f6d13f8
Show file tree
Hide file tree
Showing 2 changed files with 13 additions and 11 deletions.
12 changes: 7 additions & 5 deletions newrelic/hooks/mlmodel_openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ def wrap_chat_completion_create(wrapped, instance, args, kwargs):
span_id = available_metadata.get("span.id", "")
trace_id = available_metadata.get("trace.id", "")

response_headers = getattr(response, "_nr_response_headers")
response_headers = getattr(response, "_nr_response_headers", None)
response_model = response.model
settings = transaction.settings if transaction.settings is not None else global_settings()

Expand All @@ -63,11 +63,11 @@ def wrap_chat_completion_create(wrapped, instance, args, kwargs):
"response.usage.completion_tokens": response.usage.completion_tokens,
"response.usage.total_tokens": response.usage.total_tokens,
"response.usage.prompt_tokens": response.usage.prompt_tokens,
"request.temperature": kwargs.get("temperature"),
"request.max_tokens": kwargs.get("max_tokens"),
"request.temperature": kwargs.get("temperature", ""),
"request.max_tokens": kwargs.get("max_tokens", ""),
"response.choices.finish_reason": response.choices[0].finish_reason,
"response.api_type": response.api_type,
"response.headers.llmVersion": response_headers.get("openai-version"),
"response.headers.llmVersion": response_headers.get("openai-version", ""),
"response.headers.ratelimitLimitRequests": check_rate_limit_header(response_headers, "x-ratelimit-limit-requests", True),
"response.headers.ratelimitLimitTokens": check_rate_limit_header(response_headers, "x-ratelimit-limit-tokens", True),
"response.headers.ratelimitResetTokens": check_rate_limit_header(response_headers, "x-ratelimit-reset-tokens", False),
Expand All @@ -77,7 +77,7 @@ def wrap_chat_completion_create(wrapped, instance, args, kwargs):
"vendor": "openAI",
"ingest_source": "Python",
"number_of_messages": len(kwargs.get("messages", [])) + len(response.choices),
"api_version": response_headers.get("openai-version")
"api_version": response_headers.get("openai-version", "")
}

transaction.record_ml_event("LlmChatCompletionSummary", chat_completion_summary_dict)
Expand All @@ -89,6 +89,8 @@ def wrap_chat_completion_create(wrapped, instance, args, kwargs):


def check_rate_limit_header(response_headers, header_name, is_int):
if not response_headers:
return None
if header_name in response_headers:
header_value = response_headers.get(header_name)
if is_int:
Expand Down
12 changes: 6 additions & 6 deletions tests/mlmodel_openai/test_chat_completion.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,11 +38,6 @@
from newrelic.core.config import global_settings
from newrelic.packages import six

_test_openai_chat_completion_sync_messages = (
{"role": "system", "content": "You are a scientist."},
{"role": "user", "content": "What is 212 degrees Fahrenheit converted to Celsius?"},
)


def set_trace_info():
txn = current_transaction()
Expand All @@ -53,6 +48,12 @@ def set_trace_info():
trace.guid = "span-id"


_test_openai_chat_completion_sync_messages = (
{"role": "system", "content": "You are a scientist."},
{"role": "user", "content": "What is 212 degrees Fahrenheit converted to Celsius?"},
)


sync_chat_completion_recorded_events = [
(
{'type': 'LlmChatCompletionSummary'},
Expand Down Expand Up @@ -170,7 +171,6 @@ def test_openai_chat_completion_sync_outside_txn():

disabled_ml_settings = {
"machine_learning.enabled": False,
"machine_learning.inference_events_value.enabled": False,
"ml_insights_events.enabled": False
}

Expand Down

0 comments on commit f6d13f8

Please sign in to comment.