diff --git a/newrelic/agent.py b/newrelic/agent.py index 9665cb9d2..4c0718626 100644 --- a/newrelic/agent.py +++ b/newrelic/agent.py @@ -153,6 +153,10 @@ def __asgi_application(*args, **kwargs): from newrelic.api.message_transaction import ( wrap_message_transaction as __wrap_message_transaction, ) +from newrelic.api.ml_model import ( + record_llm_feedback_event as __record_llm_feedback_event, +) +from newrelic.api.ml_model import set_llm_token_count_callback as __set_llm_token_count_callback from newrelic.api.ml_model import wrap_mlmodel as __wrap_mlmodel from newrelic.api.profile_trace import ProfileTraceWrapper as __ProfileTraceWrapper from newrelic.api.profile_trace import profile_trace as __profile_trace @@ -174,10 +178,10 @@ def __asgi_application(*args, **kwargs): from newrelic.api.web_transaction import web_transaction as __web_transaction from newrelic.api.web_transaction import wrap_web_transaction as __wrap_web_transaction from newrelic.common.object_names import callable_name as __callable_name +from newrelic.common.object_wrapper import CallableObjectProxy as __CallableObjectProxy from newrelic.common.object_wrapper import FunctionWrapper as __FunctionWrapper from newrelic.common.object_wrapper import InFunctionWrapper as __InFunctionWrapper from newrelic.common.object_wrapper import ObjectProxy as __ObjectProxy -from newrelic.common.object_wrapper import CallableObjectProxy as __CallableObjectProxy from newrelic.common.object_wrapper import ObjectWrapper as __ObjectWrapper from newrelic.common.object_wrapper import OutFunctionWrapper as __OutFunctionWrapper from newrelic.common.object_wrapper import PostFunctionWrapper as __PostFunctionWrapper @@ -343,3 +347,5 @@ def __asgi_application(*args, **kwargs): insert_html_snippet = __wrap_api_call(__insert_html_snippet, "insert_html_snippet") verify_body_exists = __wrap_api_call(__verify_body_exists, "verify_body_exists") wrap_mlmodel = __wrap_api_call(__wrap_mlmodel, "wrap_mlmodel") +record_llm_feedback_event = __wrap_api_call(__record_llm_feedback_event, "record_llm_feedback_event") +set_llm_token_count_callback = __wrap_api_call(__set_llm_token_count_callback, "set_llm_token_count_callback") diff --git a/newrelic/api/ml_model.py b/newrelic/api/ml_model.py index edbcaf340..f9895ba7b 100644 --- a/newrelic/api/ml_model.py +++ b/newrelic/api/ml_model.py @@ -12,11 +12,18 @@ # See the License for the specific language governing permissions and # limitations under the License. +import logging import sys +import uuid +import warnings +from newrelic.api.transaction import current_transaction from newrelic.common.object_names import callable_name +from newrelic.core.config import global_settings from newrelic.hooks.mlmodel_sklearn import _nr_instrument_model +_logger = logging.getLogger(__name__) + def wrap_mlmodel(model, name=None, version=None, feature_names=None, label_names=None, metadata=None): model_callable_name = callable_name(model) @@ -33,3 +40,87 @@ def wrap_mlmodel(model, name=None, version=None, feature_names=None, label_names model._nr_wrapped_label_names = label_names if metadata: model._nr_wrapped_metadata = metadata + + +def record_llm_feedback_event(trace_id, rating, category=None, message=None, metadata=None): + transaction = current_transaction() + if not transaction: + warnings.warn( + "No message feedback events will be recorded. record_llm_feedback_event must be called within the " + "scope of a transaction." + ) + return + + feedback_event_id = str(uuid.uuid4()) + feedback_event = metadata.copy() if metadata else {} + feedback_event.update( + { + "id": feedback_event_id, + "trace_id": trace_id, + "rating": rating, + "category": category, + "message": message, + "ingest_source": "Python", + } + ) + + transaction.record_custom_event("LlmFeedbackMessage", feedback_event) + + +def set_llm_token_count_callback(callback, application=None): + """ + Set the current callback to be used to calculate LLM token counts. + + Arguments: + callback -- the user-defined callback that will calculate and return the total token count as an integer or None if it does not know + application -- optional application object to associate call with + """ + if callback and not callable(callback): + _logger.error( + "callback passed to set_llm_token_count_callback must be a Callable type or None to unset the callback." + ) + return + + from newrelic.api.application import application_instance + + # Check for activated application if it exists and was not given. + application = application or application_instance(activate=False) + + # Get application settings if it exists, or fallback to global settings object. + settings = application.settings if application else global_settings() + + if not settings: + _logger.error( + "Failed to set llm_token_count_callback. Settings not found on application or in global_settings." + ) + return + + if not callback: + settings.ai_monitoring._llm_token_count_callback = None + return + + def _wrap_callback(model, content): + if model is None: + _logger.debug( + "The model argument passed to the user-defined token calculation callback is None. The callback will not be run." + ) + return None + + if content is None: + _logger.debug( + "The content argument passed to the user-defined token calculation callback is None. The callback will not be run." + ) + return None + + token_count_val = callback(model, content) + + if not isinstance(token_count_val, int) or token_count_val < 0: + _logger.warning( + "llm_token_count_callback returned an invalid value of %s. This value must be a positive integer and will not be recorded for the token_count." + % token_count_val + ) + return None + + return token_count_val + + settings.ai_monitoring._llm_token_count_callback = _wrap_callback diff --git a/newrelic/api/time_trace.py b/newrelic/api/time_trace.py index 24be0e00f..40ef22512 100644 --- a/newrelic/api/time_trace.py +++ b/newrelic/api/time_trace.py @@ -29,7 +29,6 @@ ) from newrelic.core.config import is_expected_error, should_ignore_error from newrelic.core.trace_cache import trace_cache - from newrelic.packages import six _logger = logging.getLogger(__name__) @@ -260,6 +259,11 @@ def _observe_exception(self, exc_info=None, ignore=None, expected=None, status_c module, name, fullnames, message_raw = parse_exc_info((exc, value, tb)) fullname = fullnames[0] + # In case message is in JSON format for OpenAI models + # this will result in a "cleaner" message format + if getattr(value, "_nr_message", None): + message_raw = value._nr_message + # Check to see if we need to strip the message before recording it. if settings.strip_exception_messages.enabled and fullname not in settings.strip_exception_messages.allowlist: @@ -422,23 +426,32 @@ def notice_error(self, error=None, attributes=None, expected=None, ignore=None, input_attributes = {} input_attributes.update(transaction._custom_params) input_attributes.update(attributes) - error_group_name_raw = settings.error_collector.error_group_callback(value, { - "traceback": tb, - "error.class": exc, - "error.message": message_raw, - "error.expected": is_expected, - "custom_params": input_attributes, - "transactionName": getattr(transaction, "name", None), - "response.status": getattr(transaction, "_response_code", None), - "request.method": getattr(transaction, "_request_method", None), - "request.uri": getattr(transaction, "_request_uri", None), - }) + error_group_name_raw = settings.error_collector.error_group_callback( + value, + { + "traceback": tb, + "error.class": exc, + "error.message": message_raw, + "error.expected": is_expected, + "custom_params": input_attributes, + "transactionName": getattr(transaction, "name", None), + "response.status": getattr(transaction, "_response_code", None), + "request.method": getattr(transaction, "_request_method", None), + "request.uri": getattr(transaction, "_request_uri", None), + }, + ) if error_group_name_raw: _, error_group_name = process_user_attribute("error.group.name", error_group_name_raw) if error_group_name is None or not isinstance(error_group_name, six.string_types): - raise ValueError("Invalid attribute value for error.group.name. Expected string, got: %s" % repr(error_group_name_raw)) + raise ValueError( + "Invalid attribute value for error.group.name. Expected string, got: %s" + % repr(error_group_name_raw) + ) except Exception: - _logger.error("Encountered error when calling error group callback:\n%s", "".join(traceback.format_exception(*sys.exc_info()))) + _logger.error( + "Encountered error when calling error group callback:\n%s", + "".join(traceback.format_exception(*sys.exc_info())), + ) error_group_name = None transaction._create_error_node( @@ -595,13 +608,11 @@ def update_async_exclusive_time(self, min_child_start_time, exclusive_duration): def process_child(self, node, is_async): self.children.append(node) if is_async: - # record the lowest start time self.min_child_start_time = min(self.min_child_start_time, node.start_time) # if there are no children running, finalize exclusive time if self.child_count == len(self.children): - exclusive_duration = node.end_time - self.min_child_start_time self.update_async_exclusive_time(self.min_child_start_time, exclusive_duration) diff --git a/newrelic/api/transaction.py b/newrelic/api/transaction.py index 0b574feb0..5b44d1f81 100644 --- a/newrelic/api/transaction.py +++ b/newrelic/api/transaction.py @@ -176,7 +176,7 @@ def __init__(self, application, enabled=None, source=None): self.thread_id = None - self._transaction_id = id(self) + self._identity = id(self) self._transaction_lock = threading.Lock() self._dead = False @@ -193,6 +193,7 @@ def __init__(self, application, enabled=None, source=None): self._frameworks = set() self._message_brokers = set() self._dispatchers = set() + self._ml_models = set() self._frozen_path = None @@ -274,6 +275,7 @@ def __init__(self, application, enabled=None, source=None): trace_id = "%032x" % random.getrandbits(128) # 16-digit random hex. Padded with zeros in the front. + # This is the official transactionId in the UI. self.guid = trace_id[:16] # 32-digit random hex. Padded with zeros in the front. @@ -421,7 +423,7 @@ def __exit__(self, exc, value, tb): if not self.enabled: return - if self._transaction_id != id(self): + if self._identity != id(self): return if not self._settings: @@ -568,6 +570,10 @@ def __exit__(self, exc, value, tb): for dispatcher, version in self._dispatchers: self.record_custom_metric("Python/Dispatcher/%s/%s" % (dispatcher, version), 1) + if self._ml_models: + for ml_model, version in self._ml_models: + self.record_custom_metric("Supportability/Python/ML/%s/%s" % (ml_model, version), 1) + if self._settings.distributed_tracing.enabled: # Sampled and priority need to be computed at the end of the # transaction when distributed tracing or span events are enabled. @@ -1715,7 +1721,7 @@ def record_custom_event(self, event_type, params): if not settings.custom_insights_events.enabled: return - event = create_custom_event(event_type, params) + event = create_custom_event(event_type, params, settings=settings) if event: self._custom_events.add(event, priority=self.priority) @@ -1728,7 +1734,7 @@ def record_ml_event(self, event_type, params): if not settings.ml_insights_events.enabled: return - event = create_custom_event(event_type, params) + event = create_custom_event(event_type, params, settings=settings, is_ml_event=True) if event: self._ml_events.add(event, priority=self.priority) @@ -1835,6 +1841,10 @@ def add_dispatcher_info(self, name, version=None): if name: self._dispatchers.add((name, version)) + def add_ml_model_info(self, name, version=None): + if name: + self._ml_models.add((name, version)) + def dump(self, file): """Dumps details about the transaction to the file object.""" diff --git a/newrelic/common/utilization.py b/newrelic/common/utilization.py index b7ddbdf43..d1c918499 100644 --- a/newrelic/common/utilization.py +++ b/newrelic/common/utilization.py @@ -17,14 +17,14 @@ import re import socket import string -import threading from newrelic.common.agent_http import InsecureHttpClient from newrelic.common.encoding_utils import json_decode from newrelic.core.internal_metrics import internal_count_metric _logger = logging.getLogger(__name__) -VALID_CHARS_RE = re.compile(r'[0-9a-zA-Z_ ./-]') +VALID_CHARS_RE = re.compile(r"[0-9a-zA-Z_ ./-]") + class UtilizationHttpClient(InsecureHttpClient): SOCKET_TIMEOUT = 0.05 @@ -46,38 +46,35 @@ def send_request(self, *args, **kwargs): class CommonUtilization(object): - METADATA_HOST = '' - METADATA_PATH = '' + METADATA_HOST = "" + METADATA_PATH = "" METADATA_QUERY = None HEADERS = None EXPECTED_KEYS = () - VENDOR_NAME = '' + VENDOR_NAME = "" FETCH_TIMEOUT = 0.4 CLIENT_CLS = UtilizationHttpClient @classmethod def record_error(cls, resource, data): # As per spec - internal_count_metric( - 'Supportability/utilization/%s/error' % cls.VENDOR_NAME, 1) - _logger.warning('Invalid %r data (%r): %r', - cls.VENDOR_NAME, resource, data) + internal_count_metric("Supportability/utilization/%s/error" % cls.VENDOR_NAME, 1) + _logger.warning("Invalid %r data (%r): %r", cls.VENDOR_NAME, resource, data) @classmethod def fetch(cls): try: - with cls.CLIENT_CLS(cls.METADATA_HOST, - timeout=cls.FETCH_TIMEOUT) as client: - resp = client.send_request(method='GET', - path=cls.METADATA_PATH, - params=cls.METADATA_QUERY, - headers=cls.HEADERS) + with cls.CLIENT_CLS(cls.METADATA_HOST, timeout=cls.FETCH_TIMEOUT) as client: + resp = client.send_request( + method="GET", path=cls.METADATA_PATH, params=cls.METADATA_QUERY, headers=cls.HEADERS + ) if not 200 <= resp[0] < 300: raise ValueError(resp[0]) return resp[1] except Exception as e: - _logger.debug('Unable to fetch %s data from %s%s: %r', - cls.VENDOR_NAME, cls.METADATA_HOST, cls.METADATA_PATH, e) + _logger.debug( + "Unable to fetch %s data from %s%s: %r", cls.VENDOR_NAME, cls.METADATA_HOST, cls.METADATA_PATH, e + ) return None @classmethod @@ -86,11 +83,9 @@ def get_values(cls, response): return try: - return json_decode(response.decode('utf-8')) + return json_decode(response.decode("utf-8")) except ValueError: - _logger.debug('Invalid %s data (%s%s): %r', - cls.VENDOR_NAME, cls.METADATA_HOST, - cls.METADATA_PATH, response) + _logger.debug("Invalid %s data (%s%s): %r", cls.VENDOR_NAME, cls.METADATA_HOST, cls.METADATA_PATH, response) @classmethod def valid_chars(cls, data): @@ -108,7 +103,7 @@ def valid_length(cls, data): if data is None: return False - b = data.encode('utf-8') + b = data.encode("utf-8") valid = len(b) <= 255 if valid: return True @@ -123,8 +118,7 @@ def normalize(cls, key, data): try: stripped = data.strip() - if (stripped and cls.valid_length(stripped) and - cls.valid_chars(stripped)): + if stripped and cls.valid_length(stripped) and cls.valid_chars(stripped): return stripped except: pass @@ -158,28 +152,27 @@ def detect(cls): class AWSUtilization(CommonUtilization): - EXPECTED_KEYS = ('availabilityZone', 'instanceId', 'instanceType') - METADATA_HOST = '169.254.169.254' - METADATA_PATH = '/latest/dynamic/instance-identity/document' - METADATA_TOKEN_PATH = '/latest/api/token' - HEADERS = {'X-aws-ec2-metadata-token-ttl-seconds': '21600'} - VENDOR_NAME = 'aws' + EXPECTED_KEYS = ("availabilityZone", "instanceId", "instanceType") + METADATA_HOST = "169.254.169.254" + METADATA_PATH = "/latest/dynamic/instance-identity/document" + METADATA_TOKEN_PATH = "/latest/api/token" + HEADERS = {"X-aws-ec2-metadata-token-ttl-seconds": "21600"} + VENDOR_NAME = "aws" @classmethod def fetchAuthToken(cls): try: - with cls.CLIENT_CLS(cls.METADATA_HOST, - timeout=cls.FETCH_TIMEOUT) as client: - resp = client.send_request(method='PUT', - path=cls.METADATA_TOKEN_PATH, - params=cls.METADATA_QUERY, - headers=cls.HEADERS) + with cls.CLIENT_CLS(cls.METADATA_HOST, timeout=cls.FETCH_TIMEOUT) as client: + resp = client.send_request( + method="PUT", path=cls.METADATA_TOKEN_PATH, params=cls.METADATA_QUERY, headers=cls.HEADERS + ) if not 200 <= resp[0] < 300: raise ValueError(resp[0]) return resp[1] except Exception as e: - _logger.debug('Unable to fetch %s data from %s%s: %r', - cls.VENDOR_NAME, cls.METADATA_HOST, cls.METADATA_PATH, e) + _logger.debug( + "Unable to fetch %s data from %s%s: %r", cls.VENDOR_NAME, cls.METADATA_HOST, cls.METADATA_PATH, e + ) return None @classmethod @@ -189,46 +182,45 @@ def fetch(cls): if authToken == None: return cls.HEADERS = {"X-aws-ec2-metadata-token": authToken} - with cls.CLIENT_CLS(cls.METADATA_HOST, - timeout=cls.FETCH_TIMEOUT) as client: - resp = client.send_request(method='GET', - path=cls.METADATA_PATH, - params=cls.METADATA_QUERY, - headers=cls.HEADERS) + with cls.CLIENT_CLS(cls.METADATA_HOST, timeout=cls.FETCH_TIMEOUT) as client: + resp = client.send_request( + method="GET", path=cls.METADATA_PATH, params=cls.METADATA_QUERY, headers=cls.HEADERS + ) if not 200 <= resp[0] < 300: raise ValueError(resp[0]) return resp[1] except Exception as e: - _logger.debug('Unable to fetch %s data from %s%s: %r', - cls.VENDOR_NAME, cls.METADATA_HOST, cls.METADATA_PATH, e) + _logger.debug( + "Unable to fetch %s data from %s%s: %r", cls.VENDOR_NAME, cls.METADATA_HOST, cls.METADATA_PATH, e + ) return None class AzureUtilization(CommonUtilization): - METADATA_HOST = '169.254.169.254' - METADATA_PATH = '/metadata/instance/compute' - METADATA_QUERY = {'api-version': '2017-03-01'} - EXPECTED_KEYS = ('location', 'name', 'vmId', 'vmSize') - HEADERS = {'Metadata': 'true'} - VENDOR_NAME = 'azure' + METADATA_HOST = "169.254.169.254" + METADATA_PATH = "/metadata/instance/compute" + METADATA_QUERY = {"api-version": "2017-03-01"} + EXPECTED_KEYS = ("location", "name", "vmId", "vmSize") + HEADERS = {"Metadata": "true"} + VENDOR_NAME = "azure" class GCPUtilization(CommonUtilization): - EXPECTED_KEYS = ('id', 'machineType', 'name', 'zone') - HEADERS = {'Metadata-Flavor': 'Google'} - METADATA_HOST = 'metadata.google.internal' - METADATA_PATH = '/computeMetadata/v1/instance/' - METADATA_QUERY = {'recursive': 'true'} - VENDOR_NAME = 'gcp' + EXPECTED_KEYS = ("id", "machineType", "name", "zone") + HEADERS = {"Metadata-Flavor": "Google"} + METADATA_HOST = "metadata.google.internal" + METADATA_PATH = "/computeMetadata/v1/instance/" + METADATA_QUERY = {"recursive": "true"} + VENDOR_NAME = "gcp" @classmethod def normalize(cls, key, data): if data is None: return - if key in ('machineType', 'zone'): - formatted = data.strip().split('/')[-1] - elif key == 'id': + if key in ("machineType", "zone"): + formatted = data.strip().split("/")[-1] + elif key == "id": formatted = str(data) else: formatted = data @@ -237,14 +229,14 @@ def normalize(cls, key, data): class PCFUtilization(CommonUtilization): - EXPECTED_KEYS = ('cf_instance_guid', 'cf_instance_ip', 'memory_limit') - VENDOR_NAME = 'pcf' + EXPECTED_KEYS = ("cf_instance_guid", "cf_instance_ip", "memory_limit") + VENDOR_NAME = "pcf" @staticmethod def fetch(): - cf_instance_guid = os.environ.get('CF_INSTANCE_GUID') - cf_instance_ip = os.environ.get('CF_INSTANCE_IP') - memory_limit = os.environ.get('MEMORY_LIMIT') + cf_instance_guid = os.environ.get("CF_INSTANCE_GUID") + cf_instance_ip = os.environ.get("CF_INSTANCE_IP") + memory_limit = os.environ.get("MEMORY_LIMIT") pcf_vars = (cf_instance_guid, cf_instance_ip, memory_limit) if all(pcf_vars): return pcf_vars @@ -256,35 +248,35 @@ def get_values(cls, response): values = {} for k, v in zip(cls.EXPECTED_KEYS, response): - if hasattr(v, 'decode'): - v = v.decode('utf-8') + if hasattr(v, "decode"): + v = v.decode("utf-8") values[k] = v return values class DockerUtilization(CommonUtilization): - VENDOR_NAME = 'docker' - EXPECTED_KEYS = ('id',) + VENDOR_NAME = "docker" + EXPECTED_KEYS = ("id",) + + METADATA_FILE_CGROUPS_V1 = "/proc/self/cgroup" + METADATA_RE_CGROUPS_V1 = re.compile(r"[0-9a-f]{64,}") - METADATA_FILE_CGROUPS_V1 = '/proc/self/cgroup' - METADATA_RE_CGROUPS_V1 = re.compile(r'[0-9a-f]{64,}') - - METADATA_FILE_CGROUPS_V2 = '/proc/self/mountinfo' - METADATA_RE_CGROUPS_V2 = re.compile(r'^.*/docker/containers/([0-9a-f]{64,})/.*$') + METADATA_FILE_CGROUPS_V2 = "/proc/self/mountinfo" + METADATA_RE_CGROUPS_V2 = re.compile(r"^.*/docker/containers/([0-9a-f]{64,})/.*$") @classmethod def fetch(cls): # Try to read from cgroups try: - with open(cls.METADATA_FILE_CGROUPS_V1, 'rb') as f: + with open(cls.METADATA_FILE_CGROUPS_V1, "rb") as f: for line in f: - stripped = line.decode('utf-8').strip() - cgroup = stripped.split(':') + stripped = line.decode("utf-8").strip() + cgroup = stripped.split(":") if len(cgroup) != 3: continue - subsystems = cgroup[1].split(',') - if 'cpu' in subsystems: - contents = cgroup[2].split('/')[-1] + subsystems = cgroup[1].split(",") + if "cpu" in subsystems: + contents = cgroup[2].split("/")[-1] match = cls.METADATA_RE_CGROUPS_V1.search(contents) if match: return match.group(0) @@ -295,9 +287,9 @@ def fetch(cls): # Fallback to reading from mountinfo try: - with open(cls.METADATA_FILE_CGROUPS_V2, 'rb') as f: + with open(cls.METADATA_FILE_CGROUPS_V2, "rb") as f: for line in f: - stripped = line.decode('utf-8').strip() + stripped = line.decode("utf-8").strip() match = cls.METADATA_RE_CGROUPS_V2.match(stripped) if match: return match.group(1) @@ -311,7 +303,7 @@ def get_values(cls, contents): if contents is None: return - return {'id': contents} + return {"id": contents} @classmethod def valid_chars(cls, data): @@ -336,12 +328,12 @@ def valid_length(cls, data): class KubernetesUtilization(CommonUtilization): - EXPECTED_KEYS = ('kubernetes_service_host', ) - VENDOR_NAME = 'kubernetes' + EXPECTED_KEYS = ("kubernetes_service_host",) + VENDOR_NAME = "kubernetes" @staticmethod def fetch(): - kubernetes_service_host = os.environ.get('KUBERNETES_SERVICE_HOST') + kubernetes_service_host = os.environ.get("KUBERNETES_SERVICE_HOST") if kubernetes_service_host: return kubernetes_service_host @@ -350,7 +342,7 @@ def get_values(cls, v): if v is None: return - if hasattr(v, 'decode'): - v = v.decode('utf-8') + if hasattr(v, "decode"): + v = v.decode("utf-8") - return {'kubernetes_service_host': v} + return {"kubernetes_service_host": v} diff --git a/newrelic/config.py b/newrelic/config.py index 2528d84d3..370677a8f 100644 --- a/newrelic/config.py +++ b/newrelic/config.py @@ -34,7 +34,6 @@ import newrelic.api.generator_trace import newrelic.api.import_hook import newrelic.api.memcache_trace -from newrelic.common.object_names import callable_name import newrelic.api.profile_trace import newrelic.api.settings import newrelic.api.transaction_name @@ -43,7 +42,7 @@ import newrelic.core.agent import newrelic.core.config from newrelic.common.log_file import initialize_logging -from newrelic.common.object_names import expand_builtin_exception_name +from newrelic.common.object_names import callable_name, expand_builtin_exception_name from newrelic.core import trace_cache from newrelic.core.config import ( Settings, @@ -443,6 +442,7 @@ def _process_configuration(section): ) _process_setting(section, "custom_insights_events.enabled", "getboolean", None) _process_setting(section, "custom_insights_events.max_samples_stored", "getint", None) + _process_setting(section, "custom_insights_events.max_attribute_value", "getint", None) _process_setting(section, "ml_insights_events.enabled", "getboolean", None) _process_setting(section, "distributed_tracing.enabled", "getboolean", None) _process_setting(section, "distributed_tracing.exclude_newrelic_header", "getboolean", None) @@ -560,6 +560,9 @@ def _process_configuration(section): _process_setting(section, "machine_learning.enabled", "getboolean", None) _process_setting(section, "machine_learning.inference_events_value.enabled", "getboolean", None) + _process_setting(section, "ai_monitoring.enabled", "getboolean", None) + _process_setting(section, "ai_monitoring.record_content.enabled", "getboolean", None) + _process_setting(section, "ai_monitoring.streaming.enabled", "getboolean", None) _process_setting(section, "package_reporting.enabled", "getboolean", None) @@ -909,6 +912,10 @@ def apply_local_high_security_mode_setting(settings): settings.machine_learning.inference_events_value.enabled = False _logger.info(log_template, "machine_learning.inference_events_value.enabled", True, False) + if settings.ai_monitoring.enabled: + settings.ai_monitoring.enabled = False + _logger.info(log_template, "ai_monitoring.enabled", True, False) + return settings @@ -2041,11 +2048,545 @@ def _process_trace_cache_import_hooks(): def _process_module_builtin_defaults(): + _process_module_definition( + "openai.api_resources.embedding", + "newrelic.hooks.mlmodel_openai", + "instrument_openai_api_resources_embedding", + ) + _process_module_definition( + "openai.api_resources.chat_completion", + "newrelic.hooks.mlmodel_openai", + "instrument_openai_api_resources_chat_completion", + ) + _process_module_definition( + "openai.resources.embeddings", + "newrelic.hooks.mlmodel_openai", + "instrument_openai_resources_embeddings", + ) + _process_module_definition( + "openai.util", + "newrelic.hooks.mlmodel_openai", + "instrument_openai_util", + ) + _process_module_definition( + "openai.api_resources.abstract.engine_api_resource", + "newrelic.hooks.mlmodel_openai", + "instrument_openai_api_resources_abstract_engine_api_resource", + ) + _process_module_definition( + "openai._streaming", + "newrelic.hooks.mlmodel_openai", + "instrument_openai__streaming", + ) + + _process_module_definition( + "openai.resources.chat.completions", + "newrelic.hooks.mlmodel_openai", + "instrument_openai_resources_chat_completions", + ) + _process_module_definition( + "openai._base_client", + "newrelic.hooks.mlmodel_openai", + "instrument_openai_base_client", + ) + _process_module_definition( "asyncio.base_events", "newrelic.hooks.coroutines_asyncio", "instrument_asyncio_base_events", ) + + _process_module_definition( + "langchain_core.runnables.base", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_runnables_chains_base", + ) + _process_module_definition( + "langchain.chains.base", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_chains_base", + ) + _process_module_definition( + "langchain_core.callbacks.manager", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_callbacks_manager", + ) + _process_module_definition( + "langchain_community.vectorstores.docarray.hnsw", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + _process_module_definition( + "langchain_community.vectorstores.docarray.in_memory", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + _process_module_definition( + "langchain_community.vectorstores.alibabacloud_opensearch", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + _process_module_definition( + "langchain_community.vectorstores.redis.base", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + _process_module_definition( + "langchain_community.vectorstores.analyticdb", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + _process_module_definition( + "langchain_community.vectorstores.annoy", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + _process_module_definition( + "langchain_community.vectorstores.apache_doris", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + _process_module_definition( + "langchain_community.vectorstores.astradb", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + _process_module_definition( + "langchain_community.vectorstores.atlas", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + _process_module_definition( + "langchain_community.vectorstores.awadb", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + _process_module_definition( + "langchain_community.vectorstores.azure_cosmos_db", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + _process_module_definition( + "langchain_community.vectorstores.azuresearch", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + _process_module_definition( + "langchain_community.vectorstores.bageldb", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + _process_module_definition( + "langchain_community.vectorstores.baiduvectordb", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + _process_module_definition( + "langchain_community.vectorstores.baiducloud_vector_search", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + _process_module_definition( + "langchain_community.vectorstores.bigquery_vector_search", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + _process_module_definition( + "langchain_community.vectorstores.cassandra", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + + _process_module_definition( + "langchain_community.vectorstores.chroma", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + + _process_module_definition( + "langchain_community.vectorstores.clarifai", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + + _process_module_definition( + "langchain_community.vectorstores.clickhouse", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + + _process_module_definition( + "langchain_community.vectorstores.couchbase", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + + _process_module_definition( + "langchain_community.vectorstores.dashvector", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + + _process_module_definition( + "langchain_community.vectorstores.databricks_vector_search", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + + _process_module_definition( + "langchain_community.vectorstores.deeplake", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + + _process_module_definition( + "langchain_community.vectorstores.dingo", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + + _process_module_definition( + "langchain_community.vectorstores.documentdb", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + + _process_module_definition( + "langchain_community.vectorstores.elastic_vector_search", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + + _process_module_definition( + "langchain_community.vectorstores.elasticsearch", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + + _process_module_definition( + "langchain_community.vectorstores.epsilla", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + + _process_module_definition( + "langchain_community.vectorstores.faiss", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + + _process_module_definition( + "langchain_community.vectorstores.hanavector", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + + _process_module_definition( + "langchain_community.vectorstores.hippo", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + + _process_module_definition( + "langchain_community.vectorstores.hologres", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + + _process_module_definition( + "langchain_community.vectorstores.infinispanvs", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + + _process_module_definition( + "langchain_community.vectorstores.kdbai", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + + _process_module_definition( + "langchain_community.vectorstores.kinetica", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + + _process_module_definition( + "langchain_community.vectorstores.lancedb", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + + _process_module_definition( + "langchain_community.vectorstores.lantern", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + + _process_module_definition( + "langchain_community.vectorstores.llm_rails", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + + _process_module_definition( + "langchain_community.vectorstores.marqo", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + + _process_module_definition( + "langchain_community.vectorstores.matching_engine", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + + _process_module_definition( + "langchain_community.vectorstores.meilisearch", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + + _process_module_definition( + "langchain_community.vectorstores.milvus", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + + _process_module_definition( + "langchain_community.vectorstores.momento_vector_index", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + + _process_module_definition( + "langchain_community.vectorstores.mongodb_atlas", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + + _process_module_definition( + "langchain_community.vectorstores.myscale", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + + _process_module_definition( + "langchain_community.vectorstores.neo4j_vector", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + + _process_module_definition( + "langchain_community.vectorstores.nucliadb", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + + _process_module_definition( + "langchain_community.vectorstores.opensearch_vector_search", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + + _process_module_definition( + "langchain_community.vectorstores.pgembedding", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + + _process_module_definition( + "langchain_community.vectorstores.pgvecto_rs", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + + _process_module_definition( + "langchain_community.vectorstores.pgvector", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + + _process_module_definition( + "langchain_community.vectorstores.pinecone", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + + _process_module_definition( + "langchain_community.vectorstores.qdrant", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + + _process_module_definition( + "langchain_community.vectorstores.rocksetdb", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + + _process_module_definition( + "langchain_community.vectorstores.scann", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + + _process_module_definition( + "langchain_community.vectorstores.semadb", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + + _process_module_definition( + "langchain_community.vectorstores.singlestoredb", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + + _process_module_definition( + "langchain_community.vectorstores.sklearn", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + + _process_module_definition( + "langchain_community.vectorstores.sqlitevss", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + + _process_module_definition( + "langchain_community.vectorstores.starrocks", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + + _process_module_definition( + "langchain_community.vectorstores.supabase", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + + _process_module_definition( + "langchain_community.vectorstores.surrealdb", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + + _process_module_definition( + "langchain_community.vectorstores.tair", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + + _process_module_definition( + "langchain_community.vectorstores.tencentvectordb", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + + _process_module_definition( + "langchain_community.vectorstores.thirdai_neuraldb", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + + _process_module_definition( + "langchain_community.vectorstores.tidb_vector", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + + _process_module_definition( + "langchain_community.vectorstores.tigris", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + + _process_module_definition( + "langchain_community.vectorstores.tiledb", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + + _process_module_definition( + "langchain_community.vectorstores.timescalevector", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + + _process_module_definition( + "langchain_community.vectorstores.typesense", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + + _process_module_definition( + "langchain_community.vectorstores.usearch", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + + _process_module_definition( + "langchain_community.vectorstores.vald", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + + _process_module_definition( + "langchain_community.vectorstores.vearch", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + + _process_module_definition( + "langchain_community.vectorstores.vectara", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + + _process_module_definition( + "langchain_community.vectorstores.vespa", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + + _process_module_definition( + "langchain_community.vectorstores.weaviate", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + + _process_module_definition( + "langchain_community.vectorstores.xata", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + + _process_module_definition( + "langchain_community.vectorstores.yellowbrick", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + + _process_module_definition( + "langchain_community.vectorstores.zep", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_vectorstore_similarity_search", + ) + + _process_module_definition( + "langchain_core.tools", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_core_tools", + ) + + _process_module_definition( + "langchain_core.callbacks.manager", + "newrelic.hooks.mlmodel_langchain", + "instrument_langchain_callbacks_manager", + ) + _process_module_definition( "asyncio.events", "newrelic.hooks.coroutines_asyncio", diff --git a/newrelic/core/agent_protocol.py b/newrelic/core/agent_protocol.py index dd4dc264f..071864757 100644 --- a/newrelic/core/agent_protocol.py +++ b/newrelic/core/agent_protocol.py @@ -147,6 +147,7 @@ class AgentProtocol(object): "ml_insights_events.enabled", "application_logging.forwarding.enabled", "machine_learning.inference_events_value.enabled", + "ai_monitoring.enabled", ) LOGGER_FUNC_MAPPING = { diff --git a/newrelic/core/application.py b/newrelic/core/application.py index 42cff443e..6c9902603 100644 --- a/newrelic/core/application.py +++ b/newrelic/core/application.py @@ -552,6 +552,7 @@ def connect_to_data_collector(self, activate_agent): application_logging_local_decorating = ( configuration.application_logging.enabled and configuration.application_logging.local_decorating.enabled ) + ai_monitoring_streaming = configuration.ai_monitoring.streaming.enabled internal_metric( "Supportability/Logging/Forwarding/Python/%s" % ("enabled" if application_logging_forwarding else "disabled"), @@ -566,6 +567,11 @@ def connect_to_data_collector(self, activate_agent): "Supportability/Logging/Metrics/Python/%s" % ("enabled" if application_logging_metrics else "disabled"), 1, ) + if not ai_monitoring_streaming: + internal_metric( + "Supportability/Python/ML/Streaming/Disabled", + 1, + ) # Infinite tracing feature toggle metrics infinite_tracing = configuration.infinite_tracing.enabled # Property that checks trace observer host @@ -916,7 +922,7 @@ def record_custom_event(self, event_type, params): if settings is None or not settings.custom_insights_events.enabled: return - event = create_custom_event(event_type, params) + event = create_custom_event(event_type, params, settings=settings) if event: with self._stats_custom_lock: @@ -932,7 +938,7 @@ def record_ml_event(self, event_type, params): if settings is None or not settings.ml_insights_events.enabled: return - event = create_custom_event(event_type, params) + event = create_custom_event(event_type, params, settings=settings, is_ml_event=True) if event: with self._stats_custom_lock: diff --git a/newrelic/core/attribute.py b/newrelic/core/attribute.py index 42c47bc6d..e0f055f47 100644 --- a/newrelic/core/attribute.py +++ b/newrelic/core/attribute.py @@ -71,6 +71,7 @@ "host.displayName", "http.statusCode", "http.url", + "llm", "message.queueName", "message.routingKey", "peer.address", @@ -90,6 +91,8 @@ MAX_NUM_USER_ATTRIBUTES = 128 MAX_ATTRIBUTE_LENGTH = 255 +MAX_NUM_ML_USER_ATTRIBUTES = 64 +MAX_ML_ATTRIBUTE_LENGTH = 4095 MAX_64_BIT_INT = 2**63 - 1 MAX_LOG_MESSAGE_LENGTH = 32768 diff --git a/newrelic/core/config.py b/newrelic/core/config.py index f627356c7..b17deee24 100644 --- a/newrelic/core/config.py +++ b/newrelic/core/config.py @@ -31,6 +31,7 @@ import newrelic.packages.six as six from newrelic.common.object_names import parse_exc_info +from newrelic.core.attribute import MAX_ATTRIBUTE_LENGTH from newrelic.core.attribute_filter import AttributeFilter try: @@ -143,6 +144,20 @@ class MachineLearningInferenceEventsValueSettings(Settings): pass +class AIMonitoringSettings(Settings): + @property + def llm_token_count_callback(self): + return self._llm_token_count_callback + + +class AIMonitoringStreamingSettings(Settings): + pass + + +class AIMonitoringRecordContentSettings(Settings): + pass + + class PackageReportingSettings(Settings): pass @@ -408,6 +423,9 @@ class EventHarvestConfigHarvestLimitSettings(Settings): _settings.application_logging.metrics = ApplicationLoggingMetricsSettings() _settings.machine_learning = MachineLearningSettings() _settings.machine_learning.inference_events_value = MachineLearningInferenceEventsValueSettings() +_settings.ai_monitoring = AIMonitoringSettings() +_settings.ai_monitoring.streaming = AIMonitoringStreamingSettings() +_settings.ai_monitoring.record_content = AIMonitoringRecordContentSettings() _settings.package_reporting = PackageReportingSettings() _settings.attributes = AttributesSettings() _settings.browser_monitoring = BrowserMonitorSettings() @@ -728,6 +746,10 @@ def default_otlp_host(host): _settings.transaction_events.attributes.include = [] _settings.custom_insights_events.enabled = True +_settings.custom_insights_events.max_attribute_value = _environ_as_int( + "NEW_RELIC_CUSTOM_INSIGHTS_EVENTS_MAX_ATTRIBUTE_VALUE", default=MAX_ATTRIBUTE_LENGTH +) + _settings.ml_insights_events.enabled = False _settings.distributed_tracing.enabled = _environ_as_bool("NEW_RELIC_DISTRIBUTED_TRACING_ENABLED", default=True) @@ -918,7 +940,14 @@ def default_otlp_host(host): _settings.machine_learning.inference_events_value.enabled = _environ_as_bool( "NEW_RELIC_MACHINE_LEARNING_INFERENCE_EVENT_VALUE_ENABLED", default=False ) +_settings.ai_monitoring.enabled = _environ_as_bool("NEW_RELIC_AI_MONITORING_ENABLED", default=False) +_settings.ai_monitoring.streaming.enabled = _environ_as_bool("NEW_RELIC_AI_MONITORING_STREAMING_ENABLED", default=True) +_settings.ai_monitoring.record_content.enabled = _environ_as_bool( + "NEW_RELIC_AI_MONITORING_RECORD_CONTENT_ENABLED", default=True +) +_settings.ai_monitoring._llm_token_count_callback = None _settings.package_reporting.enabled = _environ_as_bool("NEW_RELIC_PACKAGE_REPORTING_ENABLED", default=True) +_settings.ml_insights_events.enabled = _environ_as_bool("NEW_RELIC_ML_INSIGHTS_EVENTS_ENABLED", default=False) def global_settings(): @@ -1191,6 +1220,14 @@ def apply_server_side_settings(server_side_config=None, settings=_settings): settings_snapshot.event_harvest_config.harvest_limits.ml_event_data / 12, ) + # Since the server does not override this setting we must override it here manually + # by caping it at the max value of 4095. + apply_config_setting( + settings_snapshot, + "custom_insights_events.max_attribute_value", + min(settings_snapshot.custom_insights_events.max_attribute_value, 4095), + ) + # This will be removed at some future point # Special case for account_id which will be sent instead of # cross_process_id in the future diff --git a/newrelic/core/custom_event.py b/newrelic/core/custom_event.py index 206fb84e6..3741165e0 100644 --- a/newrelic/core/custom_event.py +++ b/newrelic/core/custom_event.py @@ -11,27 +11,41 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - import logging import re import time -from newrelic.core.attribute import (check_name_is_string, check_name_length, - process_user_attribute, NameIsNotStringException, NameTooLongException, - MAX_NUM_USER_ATTRIBUTES) - +from newrelic.core.attribute import ( + MAX_ML_ATTRIBUTE_LENGTH, + MAX_NUM_ML_USER_ATTRIBUTES, + MAX_NUM_USER_ATTRIBUTES, + NameIsNotStringException, + NameTooLongException, + check_name_is_string, + check_name_length, + process_user_attribute, +) +from newrelic.core.config import global_settings _logger = logging.getLogger(__name__) -EVENT_TYPE_VALID_CHARS_REGEX = re.compile(r'^[a-zA-Z0-9:_ ]+$') +EVENT_TYPE_VALID_CHARS_REGEX = re.compile(r"^[a-zA-Z0-9:_ ]+$") +NO_LIMIT_LLM_EVENT_TYPE = { + "LlmChatCompletionMessage": "content", + "LlmEmbedding": "input", +} + + +class NameInvalidCharactersException(Exception): + pass -class NameInvalidCharactersException(Exception): pass def check_event_type_valid_chars(name): regex = EVENT_TYPE_VALID_CHARS_REGEX if not regex.match(name): raise NameInvalidCharactersException() + def process_event_type(name): """Perform all necessary validation on a potential event type. @@ -55,24 +69,22 @@ def process_event_type(name): check_event_type_valid_chars(name) except NameIsNotStringException: - _logger.debug('Event type must be a string. Dropping ' - 'event: %r', name) + _logger.debug("Event type must be a string. Dropping event: %r", name) return FAILED_RESULT except NameTooLongException: - _logger.debug('Event type exceeds maximum length. Dropping ' - 'event: %r', name) + _logger.debug("Event type exceeds maximum length. Dropping event: %r", name) return FAILED_RESULT except NameInvalidCharactersException: - _logger.debug('Event type has invalid characters. Dropping ' - 'event: %r', name) + _logger.debug("Event type has invalid characters. Dropping event: %r", name) return FAILED_RESULT else: return name -def create_custom_event(event_type, params): + +def create_custom_event(event_type, params, settings=None, is_ml_event=False): """Creates a valid custom event. Ensures that the custom event has a valid name, and also checks @@ -83,12 +95,16 @@ def create_custom_event(event_type, params): Args: event_type (str): The type (name) of the custom event. params (dict): Attributes to add to the event. + settings: Optional config settings. + is_ml_event (bool): Boolean indicating whether create_custom_event was called from + record_ml_event for truncation purposes Returns: Custom event (list of 2 dicts), if successful. None, if not successful. """ + settings = settings or global_settings() name = process_event_type(event_type) @@ -99,23 +115,38 @@ def create_custom_event(event_type, params): try: for k, v in params.items(): - key, value = process_user_attribute(k, v) + if is_ml_event: + max_length = MAX_ML_ATTRIBUTE_LENGTH + max_num_attrs = MAX_NUM_ML_USER_ATTRIBUTES + else: + max_length = ( + settings.custom_insights_events.max_attribute_value + if not (NO_LIMIT_LLM_EVENT_TYPE.get(name) == k) + else None + ) + max_num_attrs = MAX_NUM_USER_ATTRIBUTES + key, value = process_user_attribute(k, v, max_length=max_length) if key: - if len(attributes) >= MAX_NUM_USER_ATTRIBUTES: - _logger.debug('Maximum number of attributes already ' - 'added to event %r. Dropping attribute: %r=%r', - name, key, value) + if len(attributes) >= max_num_attrs: + _logger.debug( + "Maximum number of attributes already added to event %r. Dropping attribute: %r=%r", + name, + key, + value, + ) else: attributes[key] = value except Exception: - _logger.debug('Attributes failed to validate for unknown reason. ' - 'Check traceback for clues. Dropping event: %r.', name, - exc_info=True) + _logger.debug( + "Attributes failed to validate for unknown reason. Check traceback for clues. Dropping event: %r.", + name, + exc_info=True, + ) return None intrinsics = { - 'type': name, - 'timestamp': int(1000.0 * time.time()), + "type": name, + "timestamp": int(1000.0 * time.time()), } event = [intrinsics, attributes] diff --git a/newrelic/core/otlp_utils.py b/newrelic/core/otlp_utils.py index e78a63603..0719fed33 100644 --- a/newrelic/core/otlp_utils.py +++ b/newrelic/core/otlp_utils.py @@ -21,6 +21,7 @@ import logging +from newrelic.api.time_trace import get_service_linking_metadata from newrelic.common.encoding_utils import json_encode from newrelic.core.config import global_settings from newrelic.core.stats_engine import CountStats, TimeStats @@ -124,8 +125,11 @@ def create_key_values_from_iterable(iterable): ) -def create_resource(attributes=None): +def create_resource(attributes=None, attach_apm_entity=True): attributes = attributes or {"instrumentation.provider": "newrelic-opentelemetry-python-ml"} + if attach_apm_entity: + metadata = get_service_linking_metadata() + attributes.update(metadata) return Resource(attributes=create_key_values_from_iterable(attributes)) @@ -203,7 +207,7 @@ def stats_to_otlp_metrics(metric_data, start_time, end_time): def encode_metric_data(metric_data, start_time, end_time, resource=None, scope=None): - resource = resource or create_resource() + resource = resource or create_resource(attach_apm_entity=False) return MetricsData( resource_metrics=[ ResourceMetrics( @@ -220,24 +224,45 @@ def encode_metric_data(metric_data, start_time, end_time, resource=None, scope=N def encode_ml_event_data(custom_event_data, agent_run_id): - resource = create_resource() - ml_events = [] + # An InferenceEvent is attached to a separate ML Model entity instead + # of the APM entity. + ml_inference_events = [] + ml_apm_events = [] for event in custom_event_data: event_info, event_attrs = event + event_type = event_info["type"] event_attrs.update( { "real_agent_id": agent_run_id, "event.domain": "newrelic.ml_events", - "event.name": event_info["type"], + "event.name": event_type, } ) ml_attrs = create_key_values_from_iterable(event_attrs) unix_nano_timestamp = event_info["timestamp"] * 1e6 - ml_events.append( - { - "time_unix_nano": int(unix_nano_timestamp), - "attributes": ml_attrs, - } + if event_type == "InferenceEvent": + ml_inference_events.append( + { + "time_unix_nano": int(unix_nano_timestamp), + "attributes": ml_attrs, + } + ) + else: + ml_apm_events.append( + { + "time_unix_nano": int(unix_nano_timestamp), + "attributes": ml_attrs, + } + ) + + resource_logs = [] + if ml_inference_events: + inference_resource = create_resource(attach_apm_entity=False) + resource_logs.append( + ResourceLogs(resource=inference_resource, scope_logs=[ScopeLogs(log_records=ml_inference_events)]) ) + if ml_apm_events: + apm_resource = create_resource() + resource_logs.append(ResourceLogs(resource=apm_resource, scope_logs=[ScopeLogs(log_records=ml_apm_events)])) - return LogsData(resource_logs=[ResourceLogs(resource=resource, scope_logs=[ScopeLogs(log_records=ml_events)])]) + return LogsData(resource_logs=resource_logs) diff --git a/newrelic/core/stats_engine.py b/newrelic/core/stats_engine.py index aef6b4408..42a785d4a 100644 --- a/newrelic/core/stats_engine.py +++ b/newrelic/core/stats_engine.py @@ -725,6 +725,11 @@ def notice_error(self, error=None, attributes=None, expected=None, ignore=None, module, name, fullnames, message_raw = parse_exc_info(error) fullname = fullnames[0] + # In the case case of JSON formatting for OpenAI models + # this will result in a "cleaner" message format + if getattr(value, "_nr_message", None): + message_raw = value._nr_message + # Check to see if we need to strip the message before recording it. if settings.strip_exception_messages.enabled and fullname not in settings.strip_exception_messages.allowlist: diff --git a/newrelic/hooks/external_botocore.py b/newrelic/hooks/external_botocore.py index 2f2b8a113..cac3ad287 100644 --- a/newrelic/hooks/external_botocore.py +++ b/newrelic/hooks/external_botocore.py @@ -12,10 +12,41 @@ # See the License for the specific language governing permissions and # limitations under the License. +import json +import logging +import sys +import traceback +import uuid +from io import BytesIO + +from botocore.response import StreamingBody + from newrelic.api.datastore_trace import datastore_trace from newrelic.api.external_trace import ExternalTrace +from newrelic.api.function_trace import FunctionTrace from newrelic.api.message_trace import message_trace -from newrelic.common.object_wrapper import wrap_function_wrapper +from newrelic.api.time_trace import get_trace_linking_metadata +from newrelic.api.transaction import current_transaction +from newrelic.common.object_wrapper import ( + ObjectProxy, + function_wrapper, + wrap_function_wrapper, +) +from newrelic.common.package_version_utils import get_package_version +from newrelic.core.config import global_settings + +BOTOCORE_VERSION = get_package_version("botocore") + + +_logger = logging.getLogger(__name__) + +EXCEPTION_HANDLING_FAILURE_LOG_MESSAGE = "Exception occurred in botocore instrumentation for AWS Bedrock: While reporting an exception in botocore, another exception occurred. Report this issue to New Relic Support.\n%s" +REQUEST_EXTACTOR_FAILURE_LOG_MESSAGE = "Exception occurred in botocore instrumentation for AWS Bedrock: Failed to extract request information. Report this issue to New Relic Support.\n%s" +RESPONSE_EXTRACTOR_FAILURE_LOG_MESSAGE = "Exception occurred in botocore instrumentation for AWS Bedrock: Failed to extract response information. If the issue persists, report this issue to New Relic support.\n%s" +RESPONSE_PROCESSING_FAILURE_LOG_MESSAGE = "Exception occurred in botocore instrumentation for AWS Bedrock: Failed to report response data. Report this issue to New Relic Support.\n%s" +EMBEDDING_STREAMING_UNSUPPORTED_LOG_MESSAGE = "Response streaming with embedding models is unsupported in botocore instrumentation for AWS Bedrock. If this feature is now supported by AWS and botocore, report this issue to New Relic Support." + +UNSUPPORTED_MODEL_WARNING_SENT = False def extract_sqs(*args, **kwargs): @@ -40,6 +71,706 @@ def extractor_string(*args, **kwargs): return extractor_list +def bedrock_error_attributes(exception, bedrock_attrs): + response = getattr(exception, "response", None) + if not response: + return bedrock_attrs + + response_metadata = response.get("ResponseMetadata", {}) + response_error = response.get("Error", {}) + bedrock_attrs.update( + { + "request_id": response_metadata.get("RequestId"), + "http.statusCode": response_metadata.get("HTTPStatusCode"), + "error.message": response_error.get("Message"), + "error.code": response_error.get("Code"), + "error": True, + } + ) + return bedrock_attrs + + +def create_chat_completion_message_event( + transaction, + input_message_list, + output_message_list, + chat_completion_id, + span_id, + trace_id, + request_model, + request_id, + llm_metadata_dict, + response_id=None, +): + if not transaction: + return + + settings = transaction.settings if transaction.settings is not None else global_settings() + + for index, message in enumerate(input_message_list): + content = message.get("content", "") + + if response_id: + id_ = "%s-%d" % (response_id, index) # Response ID was set, append message index to it. + else: + id_ = str(uuid.uuid4()) # No response IDs, use random UUID + + chat_completion_message_dict = { + "id": id_, + "request_id": request_id, + "span_id": span_id, + "trace_id": trace_id, + "token_count": settings.ai_monitoring.llm_token_count_callback(request_model, content) + if settings.ai_monitoring.llm_token_count_callback + else None, + "role": message.get("role"), + "completion_id": chat_completion_id, + "sequence": index, + "response.model": request_model, + "vendor": "bedrock", + "ingest_source": "Python", + } + + if settings.ai_monitoring.record_content.enabled: + chat_completion_message_dict["content"] = content + + chat_completion_message_dict.update(llm_metadata_dict) + + transaction.record_custom_event("LlmChatCompletionMessage", chat_completion_message_dict) + + for index, message in enumerate(output_message_list): + index += len(input_message_list) + content = message.get("content", "") + + if response_id: + id_ = "%s-%d" % (response_id, index) # Response ID was set, append message index to it. + else: + id_ = str(uuid.uuid4()) # No response IDs, use random UUID + + chat_completion_message_dict = { + "id": id_, + "request_id": request_id, + "span_id": span_id, + "trace_id": trace_id, + "token_count": settings.ai_monitoring.llm_token_count_callback(request_model, content) + if settings.ai_monitoring.llm_token_count_callback + else None, + "role": message.get("role"), + "completion_id": chat_completion_id, + "sequence": index, + "response.model": request_model, + "vendor": "bedrock", + "ingest_source": "Python", + "is_response": True, + } + + if settings.ai_monitoring.record_content.enabled: + chat_completion_message_dict["content"] = content + + chat_completion_message_dict.update(llm_metadata_dict) + + transaction.record_custom_event("LlmChatCompletionMessage", chat_completion_message_dict) + + +def extract_bedrock_titan_text_model_request(request_body, bedrock_attrs): + request_body = json.loads(request_body) + request_config = request_body.get("textGenerationConfig", {}) + + input_message_list = [{"role": "user", "content": request_body.get("inputText")}] + + bedrock_attrs["input_message_list"] = input_message_list + bedrock_attrs["request.max_tokens"] = request_config.get("maxTokenCount") + bedrock_attrs["request.temperature"] = request_config.get("temperature") + + return bedrock_attrs + + +def extract_bedrock_titan_text_model_response(response_body, bedrock_attrs): + if response_body: + response_body = json.loads(response_body) + + output_message_list = [ + {"role": "assistant", "content": result["outputText"]} for result in response_body.get("results", []) + ] + + bedrock_attrs["response.choices.finish_reason"] = response_body["results"][0]["completionReason"] + bedrock_attrs["output_message_list"] = output_message_list + + return bedrock_attrs + + +def extract_bedrock_titan_text_model_streaming_response(response_body, bedrock_attrs): + if response_body: + if "outputText" in response_body: + bedrock_attrs["output_message_list"] = messages = bedrock_attrs.get("output_message_list", []) + messages.append({"role": "assistant", "content": response_body["outputText"]}) + + bedrock_attrs["response.choices.finish_reason"] = response_body.get("completionReason", None) + + return bedrock_attrs + + +def extract_bedrock_titan_embedding_model_request(request_body, bedrock_attrs): + request_body = json.loads(request_body) + + bedrock_attrs["input"] = request_body.get("inputText") + + return bedrock_attrs + + +def extract_bedrock_cohere_embedding_model_request(request_body, bedrock_attrs): + request_body = json.loads(request_body) + + bedrock_attrs["input"] = request_body.get("texts") + + return bedrock_attrs + + +def extract_bedrock_ai21_j2_model_request(request_body, bedrock_attrs): + request_body = json.loads(request_body) + + input_message_list = [{"role": "user", "content": request_body.get("prompt")}] + + bedrock_attrs["request.max_tokens"] = request_body.get("maxTokens") + bedrock_attrs["request.temperature"] = request_body.get("temperature") + bedrock_attrs["input_message_list"] = input_message_list + + return bedrock_attrs + + +def extract_bedrock_ai21_j2_model_response(response_body, bedrock_attrs): + if response_body: + response_body = json.loads(response_body) + output_message_list = [ + {"role": "assistant", "content": result["data"]["text"]} for result in response_body.get("completions", []) + ] + + bedrock_attrs["response.choices.finish_reason"] = response_body["completions"][0]["finishReason"]["reason"] + bedrock_attrs["output_message_list"] = output_message_list + bedrock_attrs["response_id"] = str(response_body.get("id")) + + return bedrock_attrs + + +def extract_bedrock_claude_model_request(request_body, bedrock_attrs): + request_body = json.loads(request_body) + + if "messages" in request_body: + input_message_list = [ + {"role": message.get("role", "user"), "content": message.get("content")} + for message in request_body.get("messages") + ] + else: + input_message_list = [{"role": "user", "content": request_body.get("prompt")}] + + bedrock_attrs["request.max_tokens"] = request_body.get("max_tokens_to_sample") + bedrock_attrs["request.temperature"] = request_body.get("temperature") + bedrock_attrs["input_message_list"] = input_message_list + + return bedrock_attrs + + +def extract_bedrock_claude_model_response(response_body, bedrock_attrs): + if response_body: + response_body = json.loads(response_body) + role = response_body.get("role", "assistant") + content = response_body.get("content") or response_body.get("completion") + output_message_list = [{"role": role, "content": content}] + + bedrock_attrs["response.choices.finish_reason"] = response_body.get("stop_reason") + bedrock_attrs["output_message_list"] = output_message_list + + return bedrock_attrs + + +def extract_bedrock_claude_model_streaming_response(response_body, bedrock_attrs): + if response_body: + content = response_body.get("completion", "") or (response_body.get("delta") or {}).get("text", "") + if "output_message_list" not in bedrock_attrs: + bedrock_attrs["output_message_list"] = [{"role": "assistant", "content": ""}] + bedrock_attrs["output_message_list"][0]["content"] += content + bedrock_attrs["response.choices.finish_reason"] = response_body.get("stop_reason") + return bedrock_attrs + + +def extract_bedrock_llama_model_request(request_body, bedrock_attrs): + request_body = json.loads(request_body) + + input_message_list = [{"role": "user", "content": request_body.get("prompt")}] + + bedrock_attrs["request.max_tokens"] = request_body.get("max_gen_len") + bedrock_attrs["request.temperature"] = request_body.get("temperature") + bedrock_attrs["input_message_list"] = input_message_list + + return bedrock_attrs + + +def extract_bedrock_llama_model_response(response_body, bedrock_attrs): + if response_body: + response_body = json.loads(response_body) + + output_message_list = [{"role": "assistant", "content": response_body.get("generation")}] + bedrock_attrs["response.choices.finish_reason"] = response_body.get("stop_reason") + bedrock_attrs["output_message_list"] = output_message_list + + return bedrock_attrs + + +def extract_bedrock_llama_model_streaming_response(response_body, bedrock_attrs): + if response_body: + content = response_body.get("generation") + if "output_message_list" not in bedrock_attrs: + bedrock_attrs["output_message_list"] = [{"role": "assistant", "content": ""}] + bedrock_attrs["output_message_list"][0]["content"] += content + bedrock_attrs["response.choices.finish_reason"] = response_body.get("stop_reason") + return bedrock_attrs + + +def extract_bedrock_cohere_model_request(request_body, bedrock_attrs): + request_body = json.loads(request_body) + + input_message_list = [{"role": "user", "content": request_body.get("prompt")}] + + bedrock_attrs["request.max_tokens"] = request_body.get("max_tokens") + bedrock_attrs["request.temperature"] = request_body.get("temperature") + bedrock_attrs["input_message_list"] = input_message_list + + return bedrock_attrs + + +def extract_bedrock_cohere_model_response(response_body, bedrock_attrs): + if response_body: + response_body = json.loads(response_body) + + output_message_list = [ + {"role": "assistant", "content": result["text"]} for result in response_body.get("generations", []) + ] + + bedrock_attrs["response.choices.finish_reason"] = response_body["generations"][0]["finish_reason"] + bedrock_attrs["output_message_list"] = output_message_list + bedrock_attrs["response_id"] = str(response_body.get("id")) + + return bedrock_attrs + + +def extract_bedrock_cohere_model_streaming_response(response_body, bedrock_attrs): + if response_body: + bedrock_attrs["output_message_list"] = messages = bedrock_attrs.get("output_message_list", []) + messages.extend( + [{"role": "assistant", "content": result["text"]} for result in response_body.get("generations", [])] + ) + + bedrock_attrs["response.choices.finish_reason"] = response_body["generations"][0]["finish_reason"] + bedrock_attrs["response_id"] = str(response_body.get("id")) + + return bedrock_attrs + + +NULL_EXTRACTOR = lambda *args: {} # Empty extractor that returns nothing +MODEL_EXTRACTORS = [ # Order is important here, avoiding dictionaries + ( + "amazon.titan-embed", + extract_bedrock_titan_embedding_model_request, + NULL_EXTRACTOR, + NULL_EXTRACTOR, + ), + ( + "cohere.embed", + extract_bedrock_cohere_embedding_model_request, + NULL_EXTRACTOR, + NULL_EXTRACTOR, + ), + ( + "amazon.titan", + extract_bedrock_titan_text_model_request, + extract_bedrock_titan_text_model_response, + extract_bedrock_titan_text_model_streaming_response, + ), + ("ai21.j2", extract_bedrock_ai21_j2_model_request, extract_bedrock_ai21_j2_model_response, NULL_EXTRACTOR), + ( + "cohere", + extract_bedrock_cohere_model_request, + extract_bedrock_cohere_model_response, + extract_bedrock_cohere_model_streaming_response, + ), + ( + "anthropic.claude", + extract_bedrock_claude_model_request, + extract_bedrock_claude_model_response, + extract_bedrock_claude_model_streaming_response, + ), + ( + "meta.llama2", + extract_bedrock_llama_model_request, + extract_bedrock_llama_model_response, + extract_bedrock_llama_model_streaming_response, + ), +] + + +def wrap_bedrock_runtime_invoke_model(response_streaming=False): + @function_wrapper + def _wrap_bedrock_runtime_invoke_model(wrapped, instance, args, kwargs): + # Wrapped function only takes keyword arguments, no need for binding + transaction = current_transaction() + + if not transaction: + return wrapped(*args, **kwargs) + + settings = transaction.settings if transaction.settings is not None else global_settings() + if not settings.ai_monitoring.enabled: + return wrapped(*args, **kwargs) + + transaction.add_ml_model_info("Bedrock", BOTOCORE_VERSION) + transaction._add_agent_attribute("llm", True) + + # Read and replace request file stream bodies + request_body = kwargs["body"] + if hasattr(request_body, "read"): + request_body = request_body.read() + kwargs["body"] = request_body + + # Determine model to be used with extractor + model = kwargs.get("modelId") + if not model: + return wrapped(*args, **kwargs) + + is_embedding = "embed" in model + + # Determine extractor by model type + for extractor_name, request_extractor, response_extractor, stream_extractor in MODEL_EXTRACTORS: + if model.startswith(extractor_name): + break + else: + # Model was not found in extractor list + global UNSUPPORTED_MODEL_WARNING_SENT + if not UNSUPPORTED_MODEL_WARNING_SENT: + # Only send warning once to avoid spam + _logger.warning( + "Unsupported Amazon Bedrock model in use (%s). Upgrade to a newer version of the agent, and contact New Relic support if the issue persists.", + model, + ) + UNSUPPORTED_MODEL_WARNING_SENT = True + + request_extractor = response_extractor = stream_extractor = NULL_EXTRACTOR + + function_name = wrapped.__name__ + operation = "embedding" if is_embedding else "completion" + + # Function trace may not be exited in this function in the case of streaming, so start manually + ft = FunctionTrace(name=function_name, group="Llm/%s/Bedrock" % (operation)) + ft.__enter__() + + # Get trace information + available_metadata = get_trace_linking_metadata() + span_id = available_metadata.get("span.id") + trace_id = available_metadata.get("trace.id") + + try: + response = wrapped(*args, **kwargs) + except Exception as exc: + try: + bedrock_attrs = { + "model": model, + "span_id": span_id, + "trace_id": trace_id, + } + try: + request_extractor(request_body, bedrock_attrs) + except json.decoder.JSONDecodeError: + pass + except Exception: + _logger.warning(REQUEST_EXTACTOR_FAILURE_LOG_MESSAGE % traceback.format_exception(*sys.exc_info())) + + error_attributes = bedrock_error_attributes(exc, bedrock_attrs) + notice_error_attributes = { + "http.statusCode": error_attributes.get("http.statusCode"), + "error.message": error_attributes.get("error.message"), + "error.code": error_attributes.get("error.code"), + } + + if is_embedding: + notice_error_attributes.update({"embedding_id": str(uuid.uuid4())}) + else: + notice_error_attributes.update({"completion_id": str(uuid.uuid4())}) + + ft.notice_error( + attributes=notice_error_attributes, + ) + + ft.__exit__(*sys.exc_info()) + error_attributes["duration"] = ft.duration + + if operation == "embedding": + handle_embedding_event(transaction, error_attributes) + else: + handle_chat_completion_event(transaction, error_attributes) + except Exception: + _logger.warning(EXCEPTION_HANDLING_FAILURE_LOG_MESSAGE % traceback.format_exception(*sys.exc_info())) + + raise + + if not response or response_streaming and not settings.ai_monitoring.streaming.enabled: + ft.__exit__(None, None, None) + return response + + if response_streaming and operation == "embedding": + # This combination is not supported at time of writing, but may become + # a supported feature in the future. Instrumentation will need to be written + # if this becomes available. + _logger.warning(EMBEDDING_STREAMING_UNSUPPORTED_LOG_MESSAGE) + ft.__exit__(None, None, None) + return response + + response_headers = response.get("ResponseMetadata", {}).get("HTTPHeaders") or {} + bedrock_attrs = { + "request_id": response_headers.get("x-amzn-requestid"), + "model": model, + "span_id": span_id, + "trace_id": trace_id, + } + + try: + request_extractor(request_body, bedrock_attrs) + except json.decoder.JSONDecodeError: + pass + except Exception: + _logger.warning(REQUEST_EXTACTOR_FAILURE_LOG_MESSAGE % traceback.format_exception(*sys.exc_info())) + + try: + if response_streaming: + # Wrap EventStream object here to intercept __iter__ method instead of instrumenting class. + # This class is used in numerous other services in botocore, and would cause conflicts. + response["body"] = body = EventStreamWrapper(response["body"]) + body._nr_ft = ft + body._nr_bedrock_attrs = bedrock_attrs + body._nr_model_extractor = stream_extractor + return response + + # Read and replace response streaming bodies + response_body = response["body"].read() + ft.__exit__(None, None, None) + bedrock_attrs["duration"] = ft.duration + response["body"] = StreamingBody(BytesIO(response_body), len(response_body)) + + # Run response extractor for non-streaming responses + try: + response_extractor(response_body, bedrock_attrs) + except Exception: + _logger.warning(RESPONSE_EXTRACTOR_FAILURE_LOG_MESSAGE % traceback.format_exception(*sys.exc_info())) + + if operation == "embedding": + handle_embedding_event(transaction, bedrock_attrs) + else: + handle_chat_completion_event(transaction, bedrock_attrs) + + except Exception: + _logger.warning(RESPONSE_PROCESSING_FAILURE_LOG_MESSAGE % traceback.format_exception(*sys.exc_info())) + + return response + + return _wrap_bedrock_runtime_invoke_model + + +class EventStreamWrapper(ObjectProxy): + def __iter__(self): + g = GeneratorProxy(self.__wrapped__.__iter__()) + g._nr_ft = getattr(self, "_nr_ft", None) + g._nr_bedrock_attrs = getattr(self, "_nr_bedrock_attrs", {}) + g._nr_model_extractor = getattr(self, "_nr_model_extractor", NULL_EXTRACTOR) + return g + + +class GeneratorProxy(ObjectProxy): + def __init__(self, wrapped): + super(GeneratorProxy, self).__init__(wrapped) + + def __iter__(self): + return self + + def __next__(self): + transaction = current_transaction() + if not transaction: + return self.__wrapped__.__next__() + + return_val = None + try: + return_val = self.__wrapped__.__next__() + record_stream_chunk(self, return_val, transaction) + except StopIteration: + record_events_on_stop_iteration(self, transaction) + raise + except Exception as exc: + record_error(self, transaction, exc) + raise + return return_val + + def close(self): + return super(GeneratorProxy, self).close() + + +def record_stream_chunk(self, return_val, transaction): + if return_val: + try: + chunk = json.loads(return_val["chunk"]["bytes"].decode("utf-8")) + self._nr_model_extractor(chunk, self._nr_bedrock_attrs) + # In Langchain, the bedrock iterator exits early if type is "content_block_stop". + # So we need to call the record events here since stop iteration will not be raised. + _type = chunk.get("type") + if _type == "content_block_stop": + record_events_on_stop_iteration(self, transaction) + except Exception: + _logger.warning(RESPONSE_EXTRACTOR_FAILURE_LOG_MESSAGE % traceback.format_exception(*sys.exc_info())) + + +def record_events_on_stop_iteration(self, transaction): + if hasattr(self, "_nr_ft"): + bedrock_attrs = getattr(self, "_nr_bedrock_attrs", {}) + self._nr_ft.__exit__(None, None, None) + + # If there are no bedrock attrs exit early as there's no data to record. + if not bedrock_attrs: + return + + try: + bedrock_attrs["duration"] = self._nr_ft.duration + handle_chat_completion_event(transaction, bedrock_attrs) + except Exception: + _logger.warning(RESPONSE_PROCESSING_FAILURE_LOG_MESSAGE % traceback.format_exception(*sys.exc_info())) + + # Clear cached data as this can be very large. + self._nr_bedrock_attrs.clear() + + +def record_error(self, transaction, exc): + if hasattr(self, "_nr_ft"): + try: + ft = self._nr_ft + error_attributes = getattr(self, "_nr_bedrock_attrs", {}) + + # If there are no bedrock attrs exit early as there's no data to record. + if not error_attributes: + return + + error_attributes = bedrock_error_attributes(exc, error_attributes) + notice_error_attributes = { + "http.statusCode": error_attributes.get("http.statusCode"), + "error.message": error_attributes.get("error.message"), + "error.code": error_attributes.get("error.code"), + } + notice_error_attributes.update({"completion_id": str(uuid.uuid4())}) + + ft.notice_error( + attributes=notice_error_attributes, + ) + + ft.__exit__(*sys.exc_info()) + error_attributes["duration"] = ft.duration + + handle_chat_completion_event(transaction, error_attributes) + + # Clear cached data as this can be very large. + error_attributes.clear() + except Exception: + _logger.warning(EXCEPTION_HANDLING_FAILURE_LOG_MESSAGE % traceback.format_exception(*sys.exc_info())) + + +def handle_embedding_event(transaction, bedrock_attrs): + embedding_id = str(uuid.uuid4()) + + settings = transaction.settings if transaction.settings is not None else global_settings() + + # Grab LLM-related custom attributes off of the transaction to store as metadata on LLM events + custom_attrs_dict = transaction._custom_params + llm_metadata_dict = {key: value for key, value in custom_attrs_dict.items() if key.startswith("llm.")} + + span_id = bedrock_attrs.get("span_id", None) + trace_id = bedrock_attrs.get("trace_id", None) + request_id = bedrock_attrs.get("request_id", None) + model = bedrock_attrs.get("model", None) + input = bedrock_attrs.get("input") + + embedding_dict = { + "vendor": "bedrock", + "ingest_source": "Python", + "id": embedding_id, + "span_id": span_id, + "trace_id": trace_id, + "token_count": settings.ai_monitoring.llm_token_count_callback(model, input) + if settings.ai_monitoring.llm_token_count_callback + else None, + "request_id": request_id, + "duration": bedrock_attrs.get("duration", None), + "request.model": model, + "response.model": model, + "error": bedrock_attrs.get("error", None), + } + embedding_dict.update(llm_metadata_dict) + + if settings.ai_monitoring.record_content.enabled: + embedding_dict["input"] = input + + embedding_dict = {k: v for k, v in embedding_dict.items() if v is not None} + transaction.record_custom_event("LlmEmbedding", embedding_dict) + + +def handle_chat_completion_event(transaction, bedrock_attrs): + chat_completion_id = str(uuid.uuid4()) + + # Grab LLM-related custom attributes off of the transaction to store as metadata on LLM events + custom_attrs_dict = transaction._custom_params + llm_metadata_dict = {key: value for key, value in custom_attrs_dict.items() if key.startswith("llm.")} + + span_id = bedrock_attrs.get("span_id", None) + trace_id = bedrock_attrs.get("trace_id", None) + request_id = bedrock_attrs.get("request_id", None) + response_id = bedrock_attrs.get("response_id", None) + model = bedrock_attrs.get("model", None) + + settings = transaction.settings if transaction.settings is not None else global_settings() + + input_message_list = bedrock_attrs.get("input_message_list", []) + output_message_list = bedrock_attrs.get("output_message_list", []) + number_of_messages = ( + len(input_message_list) + len(output_message_list) + ) or None # If 0, attribute will be set to None and removed + + chat_completion_summary_dict = { + "vendor": "bedrock", + "ingest_source": "Python", + "id": chat_completion_id, + "span_id": span_id, + "trace_id": trace_id, + "request_id": request_id, + "response_id": response_id, + "duration": bedrock_attrs.get("duration", None), + "request.max_tokens": bedrock_attrs.get("request.max_tokens", None), + "request.temperature": bedrock_attrs.get("request.temperature", None), + "request.model": model, + "response.model": model, # Duplicate data required by the UI + "response.number_of_messages": number_of_messages, + "response.choices.finish_reason": bedrock_attrs.get("response.choices.finish_reason", None), + "error": bedrock_attrs.get("error", None), + } + chat_completion_summary_dict.update(llm_metadata_dict) + chat_completion_summary_dict = {k: v for k, v in chat_completion_summary_dict.items() if v is not None} + + transaction.record_custom_event("LlmChatCompletionSummary", chat_completion_summary_dict) + + create_chat_completion_message_event( + transaction=transaction, + input_message_list=input_message_list, + output_message_list=output_message_list, + chat_completion_id=chat_completion_id, + span_id=span_id, + trace_id=trace_id, + request_model=model, + request_id=request_id, + llm_metadata_dict=llm_metadata_dict, + response_id=response_id, + ) + + CUSTOM_TRACE_POINTS = { ("sns", "publish"): message_trace("SNS", "Produce", "Topic", extract(("TopicArn", "TargetArn"), "PhoneNumber")), ("dynamodb", "put_item"): datastore_trace("DynamoDB", extract("TableName"), "put_item"), @@ -53,6 +784,10 @@ def extractor_string(*args, **kwargs): ("sqs", "send_message"): message_trace("SQS", "Produce", "Queue", extract_sqs), ("sqs", "send_message_batch"): message_trace("SQS", "Produce", "Queue", extract_sqs), ("sqs", "receive_message"): message_trace("SQS", "Consume", "Queue", extract_sqs), + ("bedrock-runtime", "invoke_model"): wrap_bedrock_runtime_invoke_model(response_streaming=False), + ("bedrock-runtime", "invoke_model_with_response_stream"): wrap_bedrock_runtime_invoke_model( + response_streaming=True + ), } @@ -74,14 +809,20 @@ def _nr_clientcreator__create_api_method_(wrapped, instance, args, kwargs): return tracer(wrapped) +def _nr_clientcreator__create_methods(wrapped, instance, args, kwargs): + class_attributes = wrapped(*args, **kwargs) + class_attributes["_nr_wrapped"] = True + return class_attributes + + def _bind_make_request_params(operation_model, request_dict, *args, **kwargs): return operation_model, request_dict def _nr_endpoint_make_request_(wrapped, instance, args, kwargs): operation_model, request_dict = _bind_make_request_params(*args, **kwargs) - url = request_dict.get("url", "") - method = request_dict.get("method", None) + url = request_dict.get("url") + method = request_dict.get("method") with ExternalTrace(library="botocore", url=url, method=method, source=wrapped) as trace: try: @@ -104,3 +845,4 @@ def instrument_botocore_endpoint(module): def instrument_botocore_client(module): wrap_function_wrapper(module, "ClientCreator._create_api_method", _nr_clientcreator__create_api_method_) + wrap_function_wrapper(module, "ClientCreator._create_methods", _nr_clientcreator__create_methods) diff --git a/newrelic/hooks/external_feedparser.py b/newrelic/hooks/external_feedparser.py index 1d2003eb2..277c872a3 100644 --- a/newrelic/hooks/external_feedparser.py +++ b/newrelic/hooks/external_feedparser.py @@ -22,12 +22,12 @@ import newrelic.common.object_wrapper import newrelic.api.external_trace -class capture_external_trace(object): +class capture_external_trace(object): def __init__(self, wrapped): newrelic.api.object_wrapper.update_wrapper(self, wrapped) self._nr_next_object = wrapped - if not hasattr(self, '_nr_last_object'): + if not hasattr(self, "_nr_last_object"): self._nr_last_object = wrapped def __call__(self, url, *args, **kwargs): @@ -44,16 +44,15 @@ def __call__(self, url, *args, **kwargs): parsed_url = url - if parsed_url.startswith('feed:http'): + if parsed_url.startswith("feed:http"): parsed_url = parsed_url[5:] - elif parsed_url.startswith('feed:'): - parsed_url = 'http:' + url[5:] + elif parsed_url.startswith("feed:"): + parsed_url = "http:" + url[5:] - if parsed_url.split(':')[0].lower() in ['http', 'https', 'ftp']: + if parsed_url.split(":")[0].lower() in ["http", "https", "ftp"]: current_transaction = newrelic.api.transaction.current_transaction() if current_transaction: - trace = newrelic.api.external_trace.ExternalTrace( - 'feedparser', parsed_url, 'GET') + trace = newrelic.api.external_trace.ExternalTrace("feedparser", parsed_url, "GET") context_manager = trace.__enter__() try: result = self._nr_next_object(url, *args, **kwargs) @@ -68,8 +67,8 @@ def __call__(self, url, *args, **kwargs): return self._nr_next_object(url, *args, **kwargs) def __getattr__(self, name): - return getattr(self._nr_next_object, name) + return getattr(self._nr_next_object, name) + def instrument(module): - newrelic.common.object_wrapper.wrap_object( - module, 'parse', capture_external_trace) + newrelic.common.object_wrapper.wrap_object(module, "parse", capture_external_trace) diff --git a/newrelic/hooks/framework_webpy.py b/newrelic/hooks/framework_webpy.py index 3c15bd1a7..717610ac9 100644 --- a/newrelic/hooks/framework_webpy.py +++ b/newrelic/hooks/framework_webpy.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import sys - import newrelic.packages.six as six import newrelic.api.transaction @@ -25,6 +23,7 @@ from newrelic.api.wsgi_application import WSGIApplicationWrapper from newrelic.api.time_trace import notice_error + def transaction_name_delegate(*args, **kwargs): transaction = newrelic.api.transaction.current_transaction() if transaction: @@ -35,24 +34,23 @@ def transaction_name_delegate(*args, **kwargs): transaction.set_transaction_name(f) return (args, kwargs) + def wrap_handle_exception(self): transaction = newrelic.api.transaction.current_transaction() if transaction: notice_error() + def template_name(render_obj, name): return name + def instrument(module): - if module.__name__ == 'web.application': - newrelic.api.out_function.wrap_out_function( - module, 'application.wsgifunc', WSGIApplicationWrapper) - newrelic.api.in_function.wrap_in_function( - module, 'application._delegate', transaction_name_delegate) - newrelic.api.pre_function.wrap_pre_function( - module, 'application.internalerror', wrap_handle_exception) - - elif module.__name__ == 'web.template': - newrelic.api.function_trace.wrap_function_trace( - module, 'render.__getattr__', template_name, 'Template/Render') + if module.__name__ == "web.application": + newrelic.api.out_function.wrap_out_function(module, "application.wsgifunc", WSGIApplicationWrapper) + newrelic.api.in_function.wrap_in_function(module, "application._delegate", transaction_name_delegate) + newrelic.api.pre_function.wrap_pre_function(module, "application.internalerror", wrap_handle_exception) + + elif module.__name__ == "web.template": + newrelic.api.function_trace.wrap_function_trace(module, "render.__getattr__", template_name, "Template/Render") diff --git a/newrelic/hooks/mlmodel_langchain.py b/newrelic/hooks/mlmodel_langchain.py new file mode 100644 index 000000000..941720bc1 --- /dev/null +++ b/newrelic/hooks/mlmodel_langchain.py @@ -0,0 +1,868 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import sys +import traceback +import uuid + +from newrelic.api.function_trace import FunctionTrace +from newrelic.api.time_trace import get_trace_linking_metadata +from newrelic.api.transaction import current_transaction +from newrelic.common.object_wrapper import wrap_function_wrapper +from newrelic.common.package_version_utils import get_package_version +from newrelic.common.signature import bind_args +from newrelic.core.config import global_settings + +_logger = logging.getLogger(__name__) +LANGCHAIN_VERSION = get_package_version("langchain") +EXCEPTION_HANDLING_FAILURE_LOG_MESSAGE = "Exception occurred in langchain instrumentation: While reporting an exception in langchain, another exception occurred. Report this issue to New Relic Support.\n%s" +RECORD_EVENTS_FAILURE_LOG_MESSAGE = "Exception occurred in langchain instrumentation: Failed to record LLM events. Report this issue to New Relic Support.\n%s" + +VECTORSTORE_CLASSES = { + "langchain_community.vectorstores.alibabacloud_opensearch": "AlibabaCloudOpenSearch", + "langchain_community.vectorstores.analyticdb": "AnalyticDB", + "langchain_community.vectorstores.annoy": "Annoy", + "langchain_community.vectorstores.apache_doris": "ApacheDoris", + "langchain_community.vectorstores.astradb": "AstraDB", + "langchain_community.vectorstores.atlas": "AtlasDB", + "langchain_community.vectorstores.awadb": "AwaDB", + "langchain_community.vectorstores.azure_cosmos_db": "AzureCosmosDBVectorSearch", + "langchain_community.vectorstores.azuresearch": "AzureSearch", + "langchain_community.vectorstores.baiduvectordb": "BaiduVectorDB", + "langchain_community.vectorstores.bageldb": "Bagel", + "langchain_community.vectorstores.baiducloud_vector_search": "BESVectorStore", + "langchain_community.vectorstores.bigquery_vector_search": "BigQueryVectorSearch", + "langchain_community.vectorstores.cassandra": "Cassandra", + "langchain_community.vectorstores.chroma": "Chroma", + "langchain_community.vectorstores.clarifai": "Clarifai", + "langchain_community.vectorstores.clickhouse": "Clickhouse", + "langchain_community.vectorstores.couchbase": "CouchbaseVectorStore", + "langchain_community.vectorstores.dashvector": "DashVector", + "langchain_community.vectorstores.databricks_vector_search": "DatabricksVectorSearch", + "langchain_community.vectorstores.deeplake": "DeepLake", + "langchain_community.vectorstores.dingo": "Dingo", + "langchain_community.vectorstores.documentdb": "DocumentDBVectorSearch", + "langchain_community.vectorstores.elastic_vector_search": "ElasticVectorSearch", + # "langchain_community.vectorstores.elastic_vector_search": "ElasticKnnSearch", # Deprecated + "langchain_community.vectorstores.elasticsearch": "ElasticsearchStore", + "langchain_community.vectorstores.epsilla": "Epsilla", + "langchain_community.vectorstores.faiss": "FAISS", + "langchain_community.vectorstores.hanavector": "HanaDB", + "langchain_community.vectorstores.hippo": "Hippo", + "langchain_community.vectorstores.hologres": "Hologres", + "langchain_community.vectorstores.infinispanvs": "InfinispanVS", + "langchain_community.vectorstores.kdbai": "KDBAI", + "langchain_community.vectorstores.kinetica": "Kinetica", + "langchain_community.vectorstores.lancedb": "LanceDB", + "langchain_community.vectorstores.lantern": "Lantern", + "langchain_community.vectorstores.llm_rails": "LLMRails", + "langchain_community.vectorstores.marqo": "Marqo", + "langchain_community.vectorstores.matching_engine": "MatchingEngine", + "langchain_community.vectorstores.meilisearch": "Meilisearch", + "langchain_community.vectorstores.milvus": "Milvus", + "langchain_community.vectorstores.momento_vector_index": "MomentoVectorIndex", + "langchain_community.vectorstores.mongodb_atlas": "MongoDBAtlasVectorSearch", + "langchain_community.vectorstores.myscale": "MyScale", + "langchain_community.vectorstores.neo4j_vector": "Neo4jVector", + "langchain_community.vectorstores.thirdai_neuraldb": "NeuralDBVectorStore", + "langchain_community.vectorstores.nucliadb": "NucliaDB", + "langchain_community.vectorstores.opensearch_vector_search": "OpenSearchVectorSearch", + "langchain_community.vectorstores.pgembedding": "PGEmbedding", + "langchain_community.vectorstores.pgvecto_rs": "PGVecto_rs", + "langchain_community.vectorstores.pgvector": "PGVector", + "langchain_community.vectorstores.pinecone": "Pinecone", + "langchain_community.vectorstores.qdrant": "Qdrant", + "langchain_community.vectorstores.redis.base": "Redis", + "langchain_community.vectorstores.rocksetdb": "Rockset", + "langchain_community.vectorstores.scann": "ScaNN", + "langchain_community.vectorstores.semadb": "SemaDB", + "langchain_community.vectorstores.singlestoredb": "SingleStoreDB", + "langchain_community.vectorstores.sklearn": "SKLearnVectorStore", + "langchain_community.vectorstores.sqlitevss": "SQLiteVSS", + "langchain_community.vectorstores.starrocks": "StarRocks", + "langchain_community.vectorstores.supabase": "SupabaseVectorStore", + "langchain_community.vectorstores.surrealdb": "SurrealDBStore", + "langchain_community.vectorstores.tair": "Tair", + "langchain_community.vectorstores.tencentvectordb": "TencentVectorDB", + "langchain_community.vectorstores.tidb_vector": "TiDBVectorStore", + "langchain_community.vectorstores.tigris": "Tigris", + "langchain_community.vectorstores.tiledb": "TileDB", + "langchain_community.vectorstores.timescalevector": "TimescaleVector", + "langchain_community.vectorstores.typesense": "Typesense", + "langchain_community.vectorstores.usearch": "USearch", + "langchain_community.vectorstores.vald": "Vald", + "langchain_community.vectorstores.vearch": "Vearch", + "langchain_community.vectorstores.vectara": "Vectara", + "langchain_community.vectorstores.vespa": "VespaStore", + "langchain_community.vectorstores.weaviate": "Weaviate", + "langchain_community.vectorstores.xata": "XataVectorStore", + "langchain_community.vectorstores.yellowbrick": "Yellowbrick", + "langchain_community.vectorstores.zep": "ZepVectorStore", + "langchain_community.vectorstores.docarray.hnsw": "DocArrayHnswSearch", + "langchain_community.vectorstores.docarray.in_memory": "DocArrayInMemorySearch", +} + + +def _create_error_vectorstore_events(transaction, search_id, args, kwargs, linking_metadata): + settings = transaction.settings if transaction.settings is not None else global_settings() + span_id = linking_metadata.get("span.id") + trace_id = linking_metadata.get("trace.id") + request_query, request_k = bind_similarity_search(*args, **kwargs) + llm_metadata_dict = _get_llm_metadata(transaction) + vectorstore_error_dict = { + "request.k": request_k, + "id": search_id, + "span_id": span_id, + "trace_id": trace_id, + "vendor": "langchain", + "ingest_source": "Python", + "error": True, + } + + if settings.ai_monitoring.record_content.enabled: + vectorstore_error_dict["request.query"] = request_query + + vectorstore_error_dict.update(llm_metadata_dict) + transaction.record_custom_event("LlmVectorSearch", vectorstore_error_dict) + + +async def wrap_asimilarity_search(wrapped, instance, args, kwargs): + transaction = current_transaction() + if not transaction: + return await wrapped(*args, **kwargs) + + settings = transaction.settings if transaction.settings is not None else global_settings() + if not settings.ai_monitoring.enabled: + return await wrapped(*args, **kwargs) + + transaction.add_ml_model_info("Langchain", LANGCHAIN_VERSION) + transaction._add_agent_attribute("llm", True) + + search_id = str(uuid.uuid4()) + + ft = FunctionTrace(name=wrapped.__name__, group="Llm/vectorstore/Langchain") + ft.__enter__() + linking_metadata = get_trace_linking_metadata() + try: + response = await wrapped(*args, **kwargs) + except Exception as exc: + ft.notice_error(attributes={"vector_store_id": search_id}) + ft.__exit__(*sys.exc_info()) + _create_error_vectorstore_events(transaction, search_id, args, kwargs, linking_metadata) + raise + ft.__exit__(None, None, None) + + if not response: + return response + + _record_vector_search_success(transaction, linking_metadata, ft, search_id, args, kwargs, response) + return response + + +def bind_similarity_search(query, k, *args, **kwargs): + return query, k + + +def wrap_similarity_search(wrapped, instance, args, kwargs): + transaction = current_transaction() + if not transaction: + return wrapped(*args, **kwargs) + + settings = transaction.settings if transaction.settings is not None else global_settings() + if not settings.ai_monitoring.enabled: + return wrapped(*args, **kwargs) + + transaction.add_ml_model_info("Langchain", LANGCHAIN_VERSION) + transaction._add_agent_attribute("llm", True) + + search_id = str(uuid.uuid4()) + + ft = FunctionTrace(name=wrapped.__name__, group="Llm/vectorstore/Langchain") + ft.__enter__() + linking_metadata = get_trace_linking_metadata() + try: + response = wrapped(*args, **kwargs) + except Exception as exc: + ft.notice_error(attributes={"vector_store_id": search_id}) + ft.__exit__(*sys.exc_info()) + _create_error_vectorstore_events(transaction, search_id, args, kwargs, linking_metadata) + raise + ft.__exit__(None, None, None) + + if not response: + return response + + _record_vector_search_success(transaction, linking_metadata, ft, search_id, args, kwargs, response) + return response + + +def _record_vector_search_success(transaction, linking_metadata, ft, search_id, args, kwargs, response): + settings = transaction.settings if transaction.settings is not None else global_settings() + request_query, request_k = bind_similarity_search(*args, **kwargs) + duration = ft.duration + response_number_of_documents = len(response) + llm_metadata_dict = _get_llm_metadata(transaction) + span_id = linking_metadata.get("span.id") + trace_id = linking_metadata.get("trace.id") + + llm_vector_search = { + "request.k": request_k, + "duration": duration, + "response.number_of_documents": response_number_of_documents, + "span_id": span_id, + "trace_id": trace_id, + "id": search_id, + "vendor": "langchain", + "ingest_source": "Python", + } + + if settings.ai_monitoring.record_content.enabled: + llm_vector_search["request.query"] = request_query + + llm_vector_search.update(llm_metadata_dict) + transaction.record_custom_event("LlmVectorSearch", llm_vector_search) + + for index, doc in enumerate(response): + sequence = index + page_content = getattr(doc, "page_content") + metadata = getattr(doc, "metadata") or {} + + metadata_dict = {"metadata.%s" % key: value for key, value in metadata.items()} + + llm_vector_search_result = { + "id": str(uuid.uuid4()), + "search_id": search_id, + "sequence": sequence, + "span_id": span_id, + "trace_id": trace_id, + "vendor": "langchain", + "ingest_source": "Python", + } + + if settings.ai_monitoring.record_content.enabled: + llm_vector_search_result["page_content"] = page_content + llm_vector_search_result.update(metadata_dict) + llm_vector_search_result.update(llm_metadata_dict) + transaction.record_custom_event("LlmVectorSearchResult", llm_vector_search_result) + + +def wrap_tool_sync_run(wrapped, instance, args, kwargs): + transaction = current_transaction() + if not transaction: + return wrapped(*args, **kwargs) + + settings = transaction.settings if transaction.settings is not None else global_settings() + if not settings.ai_monitoring.enabled: + return wrapped(*args, **kwargs) + + # Framework metric also used for entity tagging in the UI + transaction.add_ml_model_info("Langchain", LANGCHAIN_VERSION) + transaction._add_agent_attribute("llm", True) + + tool_id, metadata, tags, tool_input, tool_name, tool_description, run_args = _capture_tool_info( + instance, wrapped, args, kwargs + ) + + ft = FunctionTrace(name=wrapped.__name__, group="Llm/tool/Langchain") + ft.__enter__() + linking_metadata = get_trace_linking_metadata() + try: + return_val = wrapped(**run_args) + except Exception as exc: + _record_tool_error( + instance, + transaction, + linking_metadata, + tags, + metadata, + tool_id, + tool_input, + tool_name, + tool_description, + ft, + ) + raise + ft.__exit__(None, None, None) + + if not return_val: + return return_val + + _record_tool_success( + instance, + transaction, + linking_metadata, + tags, + metadata, + tool_id, + tool_input, + tool_name, + tool_description, + ft, + return_val, + ) + return return_val + + +async def wrap_tool_async_run(wrapped, instance, args, kwargs): + transaction = current_transaction() + if not transaction: + return await wrapped(*args, **kwargs) + + settings = transaction.settings if transaction.settings is not None else global_settings() + if not settings.ai_monitoring.enabled: + return await wrapped(*args, **kwargs) + + # Framework metric also used for entity tagging in the UI + transaction.add_ml_model_info("Langchain", LANGCHAIN_VERSION) + transaction._add_agent_attribute("llm", True) + + tool_id, metadata, tags, tool_input, tool_name, tool_description, run_args = _capture_tool_info( + instance, wrapped, args, kwargs + ) + + ft = FunctionTrace(name=wrapped.__name__, group="Llm/tool/Langchain") + ft.__enter__() + linking_metadata = get_trace_linking_metadata() + try: + return_val = await wrapped(**run_args) + except Exception as exc: + _record_tool_error( + instance, + transaction, + linking_metadata, + tags, + metadata, + tool_id, + tool_input, + tool_name, + tool_description, + ft, + ) + raise + ft.__exit__(None, None, None) + + if not return_val: + return return_val + + _record_tool_success( + instance, + transaction, + linking_metadata, + tags, + metadata, + tool_id, + tool_input, + tool_name, + tool_description, + ft, + return_val, + ) + return return_val + + +def _capture_tool_info(instance, wrapped, args, kwargs): + run_args = bind_args(wrapped, args, kwargs) + + tool_id = str(uuid.uuid4()) + metadata = run_args.get("metadata") or {} + metadata["nr_tool_id"] = tool_id + run_args["metadata"] = metadata + tags = run_args.get("tags") or [] + tool_input = run_args.get("tool_input") + tool_name = getattr(instance, "name", None) + tool_description = getattr(instance, "description", None) + return tool_id, metadata, tags, tool_input, tool_name, tool_description, run_args + + +def _record_tool_success( + instance, + transaction, + linking_metadata, + tags, + metadata, + tool_id, + tool_input, + tool_name, + tool_description, + ft, + response, +): + settings = transaction.settings if transaction.settings is not None else global_settings() + run_id = getattr(transaction, "_nr_tool_run_ids", {}).pop(tool_id, None) + # Update tags and metadata previously obtained from run_args with instance values + metadata.update(getattr(instance, "metadata", None) or {}) + tags.extend(getattr(instance, "tags", None) or []) + full_tool_event_dict = {"metadata.%s" % key: value for key, value in metadata.items() if key != "nr_tool_id"} + full_tool_event_dict.update( + { + "id": tool_id, + "run_id": run_id, + "name": tool_name, + "description": tool_description, + "span_id": linking_metadata.get("span.id"), + "trace_id": linking_metadata.get("trace.id"), + "vendor": "langchain", + "ingest_source": "Python", + "duration": ft.duration, + "tags": tags or None, + } + ) + result = None + try: + result = str(response) + except Exception: + _logger.debug( + "Failed to convert tool response into a string.\n%s" % traceback.format_exception(*sys.exc_info()) + ) + if settings.ai_monitoring.record_content.enabled: + full_tool_event_dict.update( + { + "input": tool_input, + "output": result, + } + ) + full_tool_event_dict.update(_get_llm_metadata(transaction)) + transaction.record_custom_event("LlmTool", full_tool_event_dict) + + +def _record_tool_error( + instance, transaction, linking_metadata, tags, metadata, tool_id, tool_input, tool_name, tool_description, ft +): + settings = transaction.settings if transaction.settings is not None else global_settings() + ft.notice_error( + attributes={ + "tool_id": tool_id, + } + ) + ft.__exit__(*sys.exc_info()) + run_id = getattr(transaction, "_nr_tool_run_ids", {}).pop(tool_id, None) + # Update tags and metadata previously obtained from run_args with instance values + metadata.update(getattr(instance, "metadata", None) or {}) + tags.extend(getattr(instance, "tags", None) or []) + + # Make sure the builtin attributes take precedence over metadata attributes. + error_tool_event_dict = {"metadata.%s" % key: value for key, value in metadata.items() if key != "nr_tool_id"} + error_tool_event_dict.update( + { + "id": tool_id, + "run_id": run_id, + "name": tool_name, + "description": tool_description, + "span_id": linking_metadata.get("span.id"), + "trace_id": linking_metadata.get("trace.id"), + "vendor": "langchain", + "ingest_source": "Python", + "duration": ft.duration, + "tags": tags or None, + "error": True, + } + ) + if settings.ai_monitoring.record_content.enabled: + error_tool_event_dict["input"] = tool_input + error_tool_event_dict.update(_get_llm_metadata(transaction)) + transaction.record_custom_event("LlmTool", error_tool_event_dict) + + +def wrap_on_tool_start_sync(wrapped, instance, args, kwargs): + transaction = current_transaction() + if not transaction: + return wrapped(*args, **kwargs) + + settings = transaction.settings if transaction.settings is not None else global_settings() + if not settings.ai_monitoring.enabled: + return wrapped(*args, **kwargs) + + tool_id = _get_tool_id(instance) + run_manager = wrapped(*args, **kwargs) + _capture_tool_run_id(transaction, run_manager, tool_id) + return run_manager + + +async def wrap_on_tool_start_async(wrapped, instance, args, kwargs): + transaction = current_transaction() + if not transaction: + return await wrapped(*args, **kwargs) + + settings = transaction.settings if transaction.settings is not None else global_settings() + if not settings.ai_monitoring.enabled: + return await wrapped(*args, **kwargs) + + tool_id = _get_tool_id(instance) + run_manager = await wrapped(*args, **kwargs) + _capture_tool_run_id(transaction, run_manager, tool_id) + return run_manager + + +def _get_tool_id(instance): + return (getattr(instance, "metadata", None) or {}).pop("nr_tool_id", None) + + +def _capture_tool_run_id(transaction, run_manager, tool_id): + if tool_id: + if not hasattr(transaction, "_nr_tool_run_ids"): + transaction._nr_tool_run_ids = {} + if tool_id not in transaction._nr_tool_run_ids: + transaction._nr_tool_run_ids[tool_id] = getattr(run_manager, "run_id", None) + + +async def wrap_chain_async_run(wrapped, instance, args, kwargs): + transaction = current_transaction() + if not transaction: + return await wrapped(*args, **kwargs) + + settings = transaction.settings if transaction.settings is not None else global_settings() + if not settings.ai_monitoring.enabled: + return await wrapped(*args, **kwargs) + + # Framework metric also used for entity tagging in the UI + transaction.add_ml_model_info("Langchain", LANGCHAIN_VERSION) + transaction._add_agent_attribute("llm", True) + + run_args = bind_args(wrapped, args, kwargs) + completion_id = str(uuid.uuid4()) + add_nr_completion_id(run_args, completion_id) + # Check to see if launched from agent or directly from chain. + # The trace group will reflect from where it has started. + # The AgentExecutor class has an attribute "agent" that does + # not exist within the Chain class + group_name = "Llm/agent/Langchain" if hasattr(instance, "agent") else "Llm/chain/Langchain" + ft = FunctionTrace(name=wrapped.__name__, group=group_name) + ft.__enter__() + linking_metadata = get_trace_linking_metadata() + try: + response = await wrapped(input=run_args["input"], config=run_args["config"], **run_args.get("kwargs", {})) + except Exception as exc: + ft.notice_error( + attributes={ + "completion_id": completion_id, + } + ) + ft.__exit__(*sys.exc_info()) + _create_error_chain_run_events(transaction, instance, run_args, completion_id, linking_metadata, ft.duration) + raise + ft.__exit__(None, None, None) + + if not response: + return response + + _create_successful_chain_run_events( + transaction, instance, run_args, completion_id, response, linking_metadata, ft.duration + ) + return response + + +def wrap_chain_sync_run(wrapped, instance, args, kwargs): + transaction = current_transaction() + if not transaction: + return wrapped(*args, **kwargs) + + settings = transaction.settings if transaction.settings is not None else global_settings() + if not settings.ai_monitoring.enabled: + return wrapped(*args, **kwargs) + + # Framework metric also used for entity tagging in the UI + transaction.add_ml_model_info("Langchain", LANGCHAIN_VERSION) + transaction._add_agent_attribute("llm", True) + + run_args = bind_args(wrapped, args, kwargs) + completion_id = str(uuid.uuid4()) + add_nr_completion_id(run_args, completion_id) + # Check to see if launched from agent or directly from chain. + # The trace group will reflect from where it has started. + # The AgentExecutor class has an attribute "agent" that does + # not exist within the Chain class + group_name = "Llm/agent/Langchain" if hasattr(instance, "agent") else "Llm/chain/Langchain" + ft = FunctionTrace(name=wrapped.__name__, group=group_name) + ft.__enter__() + linking_metadata = get_trace_linking_metadata() + try: + response = wrapped(input=run_args["input"], config=run_args["config"], **run_args.get("kwargs", {})) + except Exception as exc: + ft.notice_error( + attributes={ + "completion_id": completion_id, + } + ) + ft.__exit__(*sys.exc_info()) + _create_error_chain_run_events(transaction, instance, run_args, completion_id, linking_metadata, ft.duration) + raise + ft.__exit__(None, None, None) + + if not response: + return response + + _create_successful_chain_run_events( + transaction, instance, run_args, completion_id, response, linking_metadata, ft.duration + ) + return response + + +def add_nr_completion_id(run_args, completion_id): + # invoke has an argument named "config" that contains metadata and tags. + # Add the nr_completion_id into the metadata to be used as the function call + # identifier when grabbing the run_id off the transaction. + metadata = (run_args.get("config") or {}).get("metadata") or {} + metadata["nr_completion_id"] = completion_id + if not run_args.get("config"): + run_args["config"] = {"metadata": metadata} + else: + run_args["config"]["metadata"] = metadata + + +def _create_error_chain_run_events(transaction, instance, run_args, completion_id, linking_metadata, duration): + _input = run_args.get("input") + llm_metadata_dict = _get_llm_metadata(transaction) + run_id, metadata, tags = _get_run_manager_info(transaction, run_args, instance, completion_id) + span_id = linking_metadata.get("span.id") + trace_id = linking_metadata.get("trace.id") + input_message_list = [_input] + + # Make sure the builtin attributes take precedence over metadata attributes. + full_chat_completion_summary_dict = {"metadata.%s" % key: value for key, value in metadata.items()} + full_chat_completion_summary_dict.update( + { + "id": completion_id, + "span_id": span_id, + "trace_id": trace_id, + "vendor": "langchain", + "ingest_source": "Python", + "virtual_llm": True, + "request_id": run_id, + "duration": duration, + "response.number_of_messages": len(input_message_list), + "tags": tags, + "error": True, + } + ) + full_chat_completion_summary_dict.update(llm_metadata_dict) + transaction.record_custom_event("LlmChatCompletionSummary", full_chat_completion_summary_dict) + create_chat_completion_message_event( + transaction, + input_message_list, + completion_id, + span_id, + trace_id, + run_id, + llm_metadata_dict, + [], + ) + + +def _get_run_manager_info(transaction, run_args, instance, completion_id): + run_id = getattr(transaction, "_nr_chain_run_ids", {}).pop(completion_id, "") + # metadata and tags are keys in the config parameter. + metadata = {} + metadata.update((run_args.get("config") or {}).get("metadata") or {}) + # Do not report intenral nr_completion_id in metadata. + metadata = {key: value for key, value in metadata.items() if key != "nr_completion_id"} + tags = [] + tags.extend((run_args.get("config") or {}).get("tags") or []) + return run_id, metadata, tags or None + + +def _get_llm_metadata(transaction): + # Grab LLM-related custom attributes off of the transaction to store as metadata on LLM events + custom_attrs_dict = transaction._custom_params + llm_metadata_dict = {key: value for key, value in custom_attrs_dict.items() if key.startswith("llm.")} + return llm_metadata_dict + + +def _create_successful_chain_run_events( + transaction, instance, run_args, completion_id, response, linking_metadata, duration +): + _input = run_args.get("input") + llm_metadata_dict = _get_llm_metadata(transaction) + run_id, metadata, tags = _get_run_manager_info(transaction, run_args, instance, completion_id) + span_id = linking_metadata.get("span.id") + trace_id = linking_metadata.get("trace.id") + input_message_list = [_input] + output_message_list = [] + try: + output_message_list = [response[0]] if response else [] + except: + try: + output_message_list = [str(response)] + except Exception as e: + _logger.warning( + "Unable to capture response inside langchain chain instrumentation. No response message event will be captured. Report this issue to New Relic Support.\n%s" + % traceback.format_exception(*sys.exc_info()) + ) + + # Make sure the builtin attributes take precedence over metadata attributes. + full_chat_completion_summary_dict = {"metadata.%s" % key: value for key, value in metadata.items()} + full_chat_completion_summary_dict.update( + { + "id": completion_id, + "span_id": span_id, + "trace_id": trace_id, + "vendor": "langchain", + "ingest_source": "Python", + "virtual_llm": True, + "request_id": run_id, + "duration": duration, + "response.number_of_messages": len(input_message_list) + len(output_message_list), + "tags": tags, + } + ) + full_chat_completion_summary_dict.update(llm_metadata_dict) + transaction.record_custom_event("LlmChatCompletionSummary", full_chat_completion_summary_dict) + create_chat_completion_message_event( + transaction, + input_message_list, + completion_id, + span_id, + trace_id, + run_id, + llm_metadata_dict, + output_message_list, + ) + + +def create_chat_completion_message_event( + transaction, + input_message_list, + chat_completion_id, + span_id, + trace_id, + run_id, + llm_metadata_dict, + output_message_list, +): + settings = transaction.settings if transaction.settings is not None else global_settings() + + # Loop through all input messages received from the create request and emit a custom event for each one + for index, message in enumerate(input_message_list): + chat_completion_input_message_dict = { + "id": str(uuid.uuid4()), + "request_id": run_id, + "span_id": span_id, + "trace_id": trace_id, + "completion_id": chat_completion_id, + "sequence": index, + "vendor": "langchain", + "ingest_source": "Python", + "virtual_llm": True, + } + if settings.ai_monitoring.record_content.enabled: + chat_completion_input_message_dict["content"] = message + chat_completion_input_message_dict.update(llm_metadata_dict) + transaction.record_custom_event("LlmChatCompletionMessage", chat_completion_input_message_dict) + + if output_message_list: + # Loop through all output messages received from the LLM response and emit a custom event for each one + for index, message in enumerate(output_message_list): + # Add offset of input_message_length so we don't receive any duplicate index values that match the input message IDs + index += len(input_message_list) + + chat_completion_output_message_dict = { + "id": str(uuid.uuid4()), + "request_id": run_id, + "span_id": span_id, + "trace_id": trace_id, + "completion_id": chat_completion_id, + "sequence": index, + "vendor": "langchain", + "ingest_source": "Python", + "is_response": True, + "virtual_llm": True, + } + if settings.ai_monitoring.record_content.enabled: + chat_completion_output_message_dict["content"] = message + chat_completion_output_message_dict.update(llm_metadata_dict) + transaction.record_custom_event("LlmChatCompletionMessage", chat_completion_output_message_dict) + + +def wrap_on_chain_start(wrapped, instance, args, kwargs): + transaction = current_transaction() + if not transaction: + return wrapped(*args, **kwargs) + + settings = transaction.settings if transaction.settings is not None else global_settings() + if not settings.ai_monitoring.enabled: + return wrapped(*args, **kwargs) + + completion_id = _get_completion_id(instance) + run_manager = wrapped(*args, **kwargs) + _capture_chain_run_id(transaction, run_manager, completion_id) + return run_manager + + +async def wrap_async_on_chain_start(wrapped, instance, args, kwargs): + transaction = current_transaction() + if not transaction: + return await wrapped(*args, **kwargs) + + settings = transaction.settings if transaction.settings is not None else global_settings() + if not settings.ai_monitoring.enabled: + return await wrapped(*args, **kwargs) + + completion_id = _get_completion_id(instance) + run_manager = await wrapped(*args, **kwargs) + _capture_chain_run_id(transaction, run_manager, completion_id) + return run_manager + + +def _get_completion_id(instance): + return (getattr(instance, "metadata", None) or {}).pop("nr_completion_id", None) + + +def _capture_chain_run_id(transaction, run_manager, completion_id): + if completion_id: + if not hasattr(transaction, "_nr_chain_run_ids"): + transaction._nr_chain_run_ids = {} + # Only capture the first run_id. + if completion_id not in transaction._nr_chain_run_ids: + transaction._nr_chain_run_ids[completion_id] = getattr(run_manager, "run_id", "") + + +def instrument_langchain_runnables_chains_base(module): + if hasattr(getattr(module, "RunnableSequence"), "invoke"): + wrap_function_wrapper(module, "RunnableSequence.invoke", wrap_chain_sync_run) + if hasattr(getattr(module, "RunnableSequence"), "ainvoke"): + wrap_function_wrapper(module, "RunnableSequence.ainvoke", wrap_chain_async_run) + + +def instrument_langchain_chains_base(module): + if hasattr(getattr(module, "Chain"), "invoke"): + wrap_function_wrapper(module, "Chain.invoke", wrap_chain_sync_run) + if hasattr(getattr(module, "Chain"), "ainvoke"): + wrap_function_wrapper(module, "Chain.ainvoke", wrap_chain_async_run) + + +def instrument_langchain_vectorstore_similarity_search(module): + vector_class = VECTORSTORE_CLASSES.get(module.__name__) + + if vector_class and hasattr(getattr(module, vector_class, ""), "similarity_search"): + wrap_function_wrapper(module, "%s.similarity_search" % vector_class, wrap_similarity_search) + if vector_class and hasattr(getattr(module, vector_class, ""), "asimilarity_search"): + wrap_function_wrapper(module, "%s.asimilarity_search" % vector_class, wrap_asimilarity_search) + + +def instrument_langchain_core_tools(module): + if hasattr(getattr(module, "BaseTool"), "run"): + wrap_function_wrapper(module, "BaseTool.run", wrap_tool_sync_run) + if hasattr(getattr(module, "BaseTool"), "arun"): + wrap_function_wrapper(module, "BaseTool.arun", wrap_tool_async_run) + + +def instrument_langchain_callbacks_manager(module): + if hasattr(getattr(module, "CallbackManager"), "on_tool_start"): + wrap_function_wrapper(module, "CallbackManager.on_tool_start", wrap_on_tool_start_sync) + if hasattr(getattr(module, "AsyncCallbackManager"), "on_tool_start"): + wrap_function_wrapper(module, "AsyncCallbackManager.on_tool_start", wrap_on_tool_start_async) + if hasattr(getattr(module, "CallbackManager"), "on_chain_start"): + wrap_function_wrapper(module, "CallbackManager.on_chain_start", wrap_on_chain_start) + if hasattr(getattr(module, "AsyncCallbackManager"), "on_chain_start"): + wrap_function_wrapper(module, "AsyncCallbackManager.on_chain_start", wrap_async_on_chain_start) diff --git a/newrelic/hooks/mlmodel_openai.py b/newrelic/hooks/mlmodel_openai.py new file mode 100644 index 000000000..e101b21be --- /dev/null +++ b/newrelic/hooks/mlmodel_openai.py @@ -0,0 +1,975 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import sys +import traceback +import uuid + +import openai + +from newrelic.api.function_trace import FunctionTrace +from newrelic.api.time_trace import get_trace_linking_metadata +from newrelic.api.transaction import current_transaction +from newrelic.common.object_wrapper import ObjectProxy, wrap_function_wrapper +from newrelic.common.package_version_utils import get_package_version +from newrelic.common.signature import bind_args +from newrelic.core.config import global_settings + +OPENAI_VERSION = get_package_version("openai") +OPENAI_VERSION_TUPLE = tuple(map(int, OPENAI_VERSION.split("."))) +OPENAI_V1 = OPENAI_VERSION_TUPLE >= (1,) +EXCEPTION_HANDLING_FAILURE_LOG_MESSAGE = "Exception occurred in openai instrumentation: While reporting an exception in openai, another exception occurred. Report this issue to New Relic Support.\n%s" +RECORD_EVENTS_FAILURE_LOG_MESSAGE = "Exception occurred in OpenAI instrumentation: Failed to record LLM events. Please report this issue to New Relic Support.\n%s" +STREAM_PARSING_FAILURE_LOG_MESSAGE = "Exception occurred in OpenAI instrumentation: Failed to process event stream information. Please report this issue to New Relic Support.\n%s" + +_logger = logging.getLogger(__name__) + + +def wrap_embedding_sync(wrapped, instance, args, kwargs): + transaction = current_transaction() + if ( + not transaction + or kwargs.get("stream", False) + or (kwargs.get("extra_headers") or {}).get("X-Stainless-Raw-Response") == "stream" + ): + return wrapped(*args, **kwargs) + settings = transaction.settings if transaction.settings is not None else global_settings() + if not settings.ai_monitoring.enabled: + return wrapped(*args, **kwargs) + + # Framework metric also used for entity tagging in the UI + transaction.add_ml_model_info("OpenAI", OPENAI_VERSION) + transaction._add_agent_attribute("llm", True) + + # Obtain attributes to be stored on embedding events regardless of whether we hit an error + embedding_id = str(uuid.uuid4()) + + ft = FunctionTrace(name=wrapped.__name__, group="Llm/embedding/OpenAI") + ft.__enter__() + linking_metadata = get_trace_linking_metadata() + try: + response = wrapped(*args, **kwargs) + except Exception as exc: + _record_embedding_error(transaction, embedding_id, linking_metadata, kwargs, ft, exc) + raise + ft.__exit__(None, None, None) + + if not response: + return response + + _record_embedding_success(transaction, embedding_id, linking_metadata, kwargs, ft, response) + return response + + +def wrap_chat_completion_sync(wrapped, instance, args, kwargs): + transaction = current_transaction() + if not transaction: + return wrapped(*args, **kwargs) + + settings = transaction.settings if transaction.settings is not None else global_settings() + if not settings.ai_monitoring.enabled: + return wrapped(*args, **kwargs) + + # Framework metric also used for entity tagging in the UI + transaction.add_ml_model_info("OpenAI", OPENAI_VERSION) + transaction._add_agent_attribute("llm", True) + + completion_id = str(uuid.uuid4()) + request_message_list = kwargs.get("messages", []) + + ft = FunctionTrace(name=wrapped.__name__, group="Llm/completion/OpenAI") + ft.__enter__() + linking_metadata = get_trace_linking_metadata() + try: + return_val = wrapped(*args, **kwargs) + except Exception as exc: + _record_completion_error(transaction, linking_metadata, completion_id, kwargs, ft, exc) + raise + _handle_completion_success(transaction, linking_metadata, completion_id, kwargs, ft, return_val) + return return_val + + +def check_rate_limit_header(response_headers, header_name, is_int): + if not response_headers: + return None + + if header_name in response_headers: + header_value = response_headers.get(header_name) + if is_int: + try: + header_value = int(header_value) + except Exception: + pass + return header_value + else: + return None + + +def create_chat_completion_message_event( + transaction, + input_message_list, + chat_completion_id, + span_id, + trace_id, + response_model, + request_model, + response_id, + request_id, + llm_metadata, + output_message_list, +): + settings = transaction.settings if transaction.settings is not None else global_settings() + + # Loop through all input messages received from the create request and emit a custom event for each one + for index, message in enumerate(input_message_list): + message_content = message.get("content") + + # Response ID was set, append message index to it. + if response_id: + message_id = "%s-%d" % (response_id, index) + # No response IDs, use random UUID + else: + message_id = str(uuid.uuid4()) + + chat_completion_input_message_dict = { + "id": message_id, + "request_id": request_id, + "span_id": span_id, + "trace_id": trace_id, + "token_count": settings.ai_monitoring.llm_token_count_callback(request_model, message_content) + if settings.ai_monitoring.llm_token_count_callback + else None, + "role": message.get("role"), + "completion_id": chat_completion_id, + "sequence": index, + "response.model": response_model, + "vendor": "openai", + "ingest_source": "Python", + } + + if settings.ai_monitoring.record_content.enabled: + chat_completion_input_message_dict["content"] = message_content + + chat_completion_input_message_dict.update(llm_metadata) + + transaction.record_custom_event("LlmChatCompletionMessage", chat_completion_input_message_dict) + + if output_message_list: + # Loop through all output messages received from the LLM response and emit a custom event for each one + for index, message in enumerate(output_message_list): + message_content = message.get("content") + + # Add offset of input_message_length so we don't receive any duplicate index values that match the input message IDs + index += len(input_message_list) + + # Response ID was set, append message index to it. + if response_id: + message_id = "%s-%d" % (response_id, index) + # No response IDs, use random UUID + else: + message_id = str(uuid.uuid4()) + + chat_completion_output_message_dict = { + "id": message_id, + "request_id": request_id, + "span_id": span_id, + "trace_id": trace_id, + "token_count": settings.ai_monitoring.llm_token_count_callback(response_model, message_content) + if settings.ai_monitoring.llm_token_count_callback + else None, + "role": message.get("role"), + "completion_id": chat_completion_id, + "sequence": index, + "response.model": response_model, + "vendor": "openai", + "ingest_source": "Python", + "is_response": True, + } + + if settings.ai_monitoring.record_content.enabled: + chat_completion_output_message_dict["content"] = message_content + + chat_completion_output_message_dict.update(llm_metadata) + + transaction.record_custom_event("LlmChatCompletionMessage", chat_completion_output_message_dict) + + +async def wrap_embedding_async(wrapped, instance, args, kwargs): + transaction = current_transaction() + if not transaction or kwargs.get("stream", False): + return await wrapped(*args, **kwargs) + + settings = transaction.settings if transaction.settings is not None else global_settings() + if not settings.ai_monitoring.enabled: + return await wrapped(*args, **kwargs) + + # Framework metric also used for entity tagging in the UI + transaction.add_ml_model_info("OpenAI", OPENAI_VERSION) + transaction._add_agent_attribute("llm", True) + + # Obtain attributes to be stored on embedding events regardless of whether we hit an error + embedding_id = str(uuid.uuid4()) + + ft = FunctionTrace(name=wrapped.__name__, group="Llm/embedding/OpenAI") + ft.__enter__() + linking_metadata = get_trace_linking_metadata() + try: + response = await wrapped(*args, **kwargs) + except Exception as exc: + _record_embedding_error(transaction, embedding_id, linking_metadata, kwargs, ft, exc) + raise + ft.__exit__(None, None, None) + + if not response: + return response + + _record_embedding_success(transaction, embedding_id, linking_metadata, kwargs, ft, response) + return response + + +def _record_embedding_success(transaction, embedding_id, linking_metadata, kwargs, ft, response): + settings = transaction.settings if transaction.settings is not None else global_settings() + span_id = linking_metadata.get("span.id") + trace_id = linking_metadata.get("trace.id") + try: + response_headers = getattr(response, "_nr_response_headers", {}) + input = kwargs.get("input") + + # In v1, response objects are pydantic models so this function call converts the + # object back to a dictionary for backwards compatibility. + attribute_response = response + if OPENAI_V1: + attribute_response = response.model_dump() + + request_id = response_headers.get("x-request-id") + response_model = attribute_response.get("model") + response_usage = attribute_response.get("usage", {}) or {} + organization = ( + response_headers.get("openai-organization") + if OPENAI_V1 + else getattr(attribute_response, "organization", None) + ) + + full_embedding_response_dict = { + "id": embedding_id, + "span_id": span_id, + "trace_id": trace_id, + "token_count": settings.ai_monitoring.llm_token_count_callback(response_model, input) + if settings.ai_monitoring.llm_token_count_callback + else None, + "request.model": kwargs.get("model") or kwargs.get("engine"), + "request_id": request_id, + "duration": ft.duration, + "response.model": response_model, + "response.organization": organization, + "response.headers.llmVersion": response_headers.get("openai-version"), + "response.headers.ratelimitLimitRequests": check_rate_limit_header( + response_headers, "x-ratelimit-limit-requests", True + ), + "response.headers.ratelimitLimitTokens": check_rate_limit_header( + response_headers, "x-ratelimit-limit-tokens", True + ), + "response.headers.ratelimitResetTokens": check_rate_limit_header( + response_headers, "x-ratelimit-reset-tokens", False + ), + "response.headers.ratelimitResetRequests": check_rate_limit_header( + response_headers, "x-ratelimit-reset-requests", False + ), + "response.headers.ratelimitRemainingTokens": check_rate_limit_header( + response_headers, "x-ratelimit-remaining-tokens", True + ), + "response.headers.ratelimitRemainingRequests": check_rate_limit_header( + response_headers, "x-ratelimit-remaining-requests", True + ), + "vendor": "openai", + "ingest_source": "Python", + } + if settings.ai_monitoring.record_content.enabled: + full_embedding_response_dict["input"] = input + full_embedding_response_dict.update(_get_llm_attributes(transaction)) + transaction.record_custom_event("LlmEmbedding", full_embedding_response_dict) + except Exception: + _logger.warning(RECORD_EVENTS_FAILURE_LOG_MESSAGE % traceback.format_exception(*sys.exc_info())) + + +def _record_embedding_error(transaction, embedding_id, linking_metadata, kwargs, ft, exc): + settings = transaction.settings if transaction.settings is not None else global_settings() + span_id = linking_metadata.get("span.id") + trace_id = linking_metadata.get("trace.id") + model = kwargs.get("model") or kwargs.get("engine") + input = kwargs.get("input") + + exc_organization = None + notice_error_attributes = {} + try: + if OPENAI_V1: + response = getattr(exc, "response", None) + response_headers = getattr(response, "headers", None) or {} + exc_organization = response_headers.get("openai-organization") + # There appears to be a bug here in openai v1 where despite having code, + # param, etc in the error response, they are not populated on the exception + # object so grab them from the response body object instead. + body = getattr(exc, "body", None) or {} + notice_error_attributes = { + "http.statusCode": getattr(exc, "status_code", None), + "error.message": body.get("message"), + "error.code": body.get("code"), + "error.param": body.get("param"), + "embedding_id": embedding_id, + } + else: + exc_organization = getattr(exc, "organization", None) + notice_error_attributes = { + "http.statusCode": getattr(exc, "http_status", None), + "error.message": getattr(exc, "_message", None), + "error.code": getattr(getattr(exc, "error", None), "code", None), + "error.param": getattr(exc, "param", None), + "embedding_id": embedding_id, + } + except Exception: + _logger.warning(EXCEPTION_HANDLING_FAILURE_LOG_MESSAGE % traceback.format_exception(*sys.exc_info())) + + message = notice_error_attributes.pop("error.message") + if message: + exc._nr_message = message + ft.notice_error( + attributes=notice_error_attributes, + ) + # Exit the trace now so that the duration is calculated. + ft.__exit__(*sys.exc_info()) + + try: + error_embedding_dict = { + "id": embedding_id, + "span_id": span_id, + "trace_id": trace_id, + "token_count": settings.ai_monitoring.llm_token_count_callback(model, input) + if settings.ai_monitoring.llm_token_count_callback + else None, + "request.model": model, + "vendor": "openai", + "ingest_source": "Python", + "response.organization": exc_organization, + "duration": ft.duration, + "error": True, + } + if settings.ai_monitoring.record_content.enabled: + error_embedding_dict["input"] = input + error_embedding_dict.update(_get_llm_attributes(transaction)) + transaction.record_custom_event("LlmEmbedding", error_embedding_dict) + except Exception: + _logger.warning(RECORD_EVENTS_FAILURE_LOG_MESSAGE % traceback.format_exception(*sys.exc_info())) + + +async def wrap_chat_completion_async(wrapped, instance, args, kwargs): + transaction = current_transaction() + if not transaction: + return await wrapped(*args, **kwargs) + + settings = transaction.settings if transaction.settings is not None else global_settings() + if not settings.ai_monitoring.enabled: + return await wrapped(*args, **kwargs) + + # Framework metric also used for entity tagging in the UI + transaction.add_ml_model_info("OpenAI", OPENAI_VERSION) + transaction._add_agent_attribute("llm", True) + + completion_id = str(uuid.uuid4()) + + ft = FunctionTrace(name=wrapped.__name__, group="Llm/completion/OpenAI") + ft.__enter__() + linking_metadata = get_trace_linking_metadata() + try: + return_val = await wrapped(*args, **kwargs) + except Exception as exc: + _record_completion_error(transaction, linking_metadata, completion_id, kwargs, ft, exc) + raise + + _handle_completion_success(transaction, linking_metadata, completion_id, kwargs, ft, return_val) + return return_val + + +def _handle_completion_success(transaction, linking_metadata, completion_id, kwargs, ft, return_val): + settings = transaction.settings if transaction.settings is not None else global_settings() + span_id = linking_metadata.get("span.id") + trace_id = linking_metadata.get("trace.id") + request_message_list = kwargs.get("messages") or [] + stream = kwargs.get("stream", False) + # Only if streaming and streaming monitoring is enabled and the response is not empty + # do we not exit the function trace. + if not stream or not settings.ai_monitoring.streaming.enabled or not return_val: + ft.__exit__(None, None, None) + + # If the return value is empty or stream monitoring is disabled exit early. + if not return_val or (stream and not settings.ai_monitoring.streaming.enabled): + return + if stream: + try: + # The function trace will be exited when in the final iteration of the response + # generator. + setattr(return_val, "_nr_ft", ft) + setattr(return_val, "_nr_openai_attrs", getattr(return_val, "_nr_openai_attrs", {})) + return_val._nr_openai_attrs["messages"] = kwargs.get("messages", []) + return_val._nr_openai_attrs["temperature"] = kwargs.get("temperature") + return_val._nr_openai_attrs["max_tokens"] = kwargs.get("max_tokens") + return_val._nr_openai_attrs["model"] = kwargs.get("model") or kwargs.get("engine") + return + except Exception: + _logger.warning(STREAM_PARSING_FAILURE_LOG_MESSAGE % traceback.format_exception(*sys.exc_info())) + + try: + # If response is not a stream generator, record the event data. + # At this point, we have a response so we can grab attributes only available on the response object + response_headers = getattr(return_val, "_nr_response_headers", {}) + # In v1, response objects are pydantic models so this function call converts the + # object back to a dictionary for backwards compatibility. + response = return_val + if OPENAI_V1: + response = response.model_dump() + + _record_completion_success(transaction, linking_metadata, completion_id, kwargs, ft, response_headers, response) + except Exception: + _logger.warning(RECORD_EVENTS_FAILURE_LOG_MESSAGE % traceback.format_exception(*sys.exc_info())) + + +def _record_completion_success(transaction, linking_metadata, completion_id, kwargs, ft, response_headers, response): + span_id = linking_metadata.get("span.id") + trace_id = linking_metadata.get("trace.id") + try: + if response: + response_model = response.get("model") + response_id = response.get("id") + response_usage = response.get("usage") or {} + output_message_list = [] + finish_reason = None + choices = response.get("choices") or [] + if choices: + output_message_list = [choices[0].get("message")] + finish_reason = choices[0].get("finish_reason") + else: + response_model = kwargs.get("response.model") + response_id = kwargs.get("id") + response_usage = {} + output_message_list = [] + finish_reason = None + if "content" in kwargs: + output_message_list = [{"content": kwargs.get("content"), "role": kwargs.get("role")}] + finish_reason = kwargs.get("finish_reason") + request_model = kwargs.get("model") or kwargs.get("engine") + + request_id = response_headers.get("x-request-id") + organization = response_headers.get("openai-organization") or getattr(response, "organization", None) + messages = kwargs.get("messages", None) or [] + input_message_list = list(messages) + full_chat_completion_summary_dict = { + "id": completion_id, + "span_id": span_id, + "trace_id": trace_id, + "request.model": request_model, + "request.temperature": kwargs.get("temperature"), + "request.max_tokens": kwargs.get("max_tokens"), + "vendor": "openai", + "ingest_source": "Python", + "request_id": request_id, + "duration": ft.duration, + "response.model": response_model, + "response.organization": organization, + "response.choices.finish_reason": finish_reason, + "response.headers.llmVersion": response_headers.get("openai-version"), + "response.headers.ratelimitLimitRequests": check_rate_limit_header( + response_headers, "x-ratelimit-limit-requests", True + ), + "response.headers.ratelimitLimitTokens": check_rate_limit_header( + response_headers, "x-ratelimit-limit-tokens", True + ), + "response.headers.ratelimitResetTokens": check_rate_limit_header( + response_headers, "x-ratelimit-reset-tokens", False + ), + "response.headers.ratelimitResetRequests": check_rate_limit_header( + response_headers, "x-ratelimit-reset-requests", False + ), + "response.headers.ratelimitRemainingTokens": check_rate_limit_header( + response_headers, "x-ratelimit-remaining-tokens", True + ), + "response.headers.ratelimitRemainingRequests": check_rate_limit_header( + response_headers, "x-ratelimit-remaining-requests", True + ), + "response.headers.ratelimitLimitTokensUsageBased": check_rate_limit_header( + response_headers, "x-ratelimit-limit-tokens_usage_based", True + ), + "response.headers.ratelimitResetTokensUsageBased": check_rate_limit_header( + response_headers, "x-ratelimit-reset-tokens_usage_based", False + ), + "response.headers.ratelimitRemainingTokensUsageBased": check_rate_limit_header( + response_headers, "x-ratelimit-remaining-tokens_usage_based", True + ), + "response.number_of_messages": len(input_message_list) + len(output_message_list), + } + llm_metadata = _get_llm_attributes(transaction) + full_chat_completion_summary_dict.update(llm_metadata) + transaction.record_custom_event("LlmChatCompletionSummary", full_chat_completion_summary_dict) + + create_chat_completion_message_event( + transaction, + input_message_list, + completion_id, + span_id, + trace_id, + response_model, + request_model, + response_id, + request_id, + llm_metadata, + output_message_list, + ) + except Exception: + _logger.warning(RECORD_EVENTS_FAILURE_LOG_MESSAGE % traceback.format_exception(*sys.exc_info())) + + +def _record_completion_error(transaction, linking_metadata, completion_id, kwargs, ft, exc): + span_id = linking_metadata.get("span.id") + trace_id = linking_metadata.get("trace.id") + request_message_list = kwargs.get("messages", None) or [] + notice_error_attributes = {} + try: + if OPENAI_V1: + response = getattr(exc, "response", None) + response_headers = getattr(response, "headers", None) or {} + exc_organization = response_headers.get("openai-organization") + # There appears to be a bug here in openai v1 where despite having code, + # param, etc in the error response, they are not populated on the exception + # object so grab them from the response body object instead. + body = getattr(exc, "body", None) or {} + notice_error_attributes = { + "http.statusCode": getattr(exc, "status_code", None), + "error.message": body.get("message"), + "error.code": body.get("code"), + "error.param": body.get("param"), + "completion_id": completion_id, + } + else: + exc_organization = getattr(exc, "organization", None) + notice_error_attributes = { + "http.statusCode": getattr(exc, "http_status", None), + "error.message": getattr(exc, "_message", None), + "error.code": getattr(getattr(exc, "error", None), "code", None), + "error.param": getattr(exc, "param", None), + "completion_id": completion_id, + } + except Exception: + _logger.warning(EXCEPTION_HANDLING_FAILURE_LOG_MESSAGE % traceback.format_exception(*sys.exc_info())) + # Override the default message if it is not empty. + message = notice_error_attributes.pop("error.message") + if message: + exc._nr_message = message + + ft.notice_error( + attributes=notice_error_attributes, + ) + # Stop the span now so we compute the duration before we create the events. + ft.__exit__(*sys.exc_info()) + + try: + # In a rare case where we are streaming the response and we do get back a request + # and response id, even though an error was encountered, record them. + response_headers = kwargs.get("response_headers") or {} + request_id = response_headers.get("x-request-id") + response_id = kwargs.get("id") + request_model = kwargs.get("model") or kwargs.get("engine") + error_chat_completion_dict = { + "id": completion_id, + "span_id": span_id, + "trace_id": trace_id, + "response.number_of_messages": len(request_message_list), + "request.model": request_model, + "request.temperature": kwargs.get("temperature"), + "request.max_tokens": kwargs.get("max_tokens"), + "vendor": "openai", + "ingest_source": "Python", + "response.organization": exc_organization, + "duration": ft.duration, + "error": True, + } + llm_metadata = _get_llm_attributes(transaction) + error_chat_completion_dict.update(llm_metadata) + transaction.record_custom_event("LlmChatCompletionSummary", error_chat_completion_dict) + + output_message_list = [] + if "content" in kwargs: + output_message_list = [{"content": kwargs.get("content"), "role": kwargs.get("role")}] + create_chat_completion_message_event( + transaction, + request_message_list, + completion_id, + span_id, + trace_id, + kwargs.get("response.model"), + request_model, + response_id, + request_id, + llm_metadata, + output_message_list, + ) + except Exception: + _logger.warning(RECORD_EVENTS_FAILURE_LOG_MESSAGE % traceback.format_exception(*sys.exc_info())) + + +def wrap_convert_to_openai_object(wrapped, instance, args, kwargs): + """Obtain reponse headers for v0.""" + transaction = current_transaction() + if not transaction: + return wrapped(*args, **kwargs) + + settings = transaction.settings if transaction.settings is not None else global_settings() + if not settings.ai_monitoring.enabled: + return wrapped(*args, **kwargs) + + resp = args[0] + returned_response = wrapped(*args, **kwargs) + + if isinstance(returned_response, openai.openai_object.OpenAIObject) and isinstance( + resp, openai.openai_response.OpenAIResponse + ): + setattr(returned_response, "_nr_response_headers", getattr(resp, "_headers", {})) + + return returned_response + + +def bind_base_client_process_response( + cast_to, + options, + response, + stream, + stream_cls, +): + nr_response_headers = getattr(response, "headers", None) or {} + return nr_response_headers + + +def wrap_base_client_process_response_sync(wrapped, instance, args, kwargs): + """Obtain response headers for v1.""" + transaction = current_transaction() + if not transaction: + return wrapped(*args, **kwargs) + + settings = transaction.settings if transaction.settings is not None else global_settings() + if not settings.ai_monitoring.enabled: + return wrapped(*args, **kwargs) + + nr_response_headers = bind_base_client_process_response(*args, **kwargs) + return_val = wrapped(*args, **kwargs) + return_val._nr_response_headers = nr_response_headers + return return_val + + +async def wrap_base_client_process_response_async(wrapped, instance, args, kwargs): + """Obtain response headers for v1.""" + transaction = current_transaction() + if not transaction: + return await wrapped(*args, **kwargs) + + settings = transaction.settings if transaction.settings is not None else global_settings() + if not settings.ai_monitoring.enabled: + return await wrapped(*args, **kwargs) + + nr_response_headers = bind_base_client_process_response(*args, **kwargs) + return_val = await wrapped(*args, **kwargs) + return_val._nr_response_headers = nr_response_headers + return return_val + + +class GeneratorProxy(ObjectProxy): + def __init__(self, wrapped): + super(GeneratorProxy, self).__init__(wrapped) + + def __iter__(self): + return self + + def __next__(self): + transaction = current_transaction() + if not transaction: + return self.__wrapped__.__next__() + + return_val = None + try: + return_val = self.__wrapped__.__next__() + _record_stream_chunk(self, return_val) + except StopIteration as e: + _record_events_on_stop_iteration(self, transaction) + raise + except Exception as exc: + _handle_streaming_completion_error(self, transaction, exc) + raise + return return_val + + def close(self): + return super(GeneratorProxy, self).close() + + +def _record_stream_chunk(self, return_val): + if return_val: + try: + if OPENAI_V1: + if getattr(return_val, "data", "").startswith("[DONE]"): + return + return_val = return_val.json() + self._nr_openai_attrs["response_headers"] = getattr(self, "_nr_response_headers", {}) + else: + self._nr_openai_attrs["response_headers"] = getattr(return_val, "_nr_response_headers", {}) + choices = return_val.get("choices") or [] + self._nr_openai_attrs["response.model"] = return_val.get("model") + self._nr_openai_attrs["id"] = return_val.get("id") + self._nr_openai_attrs["response.organization"] = return_val.get("organization") + if choices: + delta = choices[0].get("delta") or {} + if delta: + self._nr_openai_attrs["content"] = self._nr_openai_attrs.get("content", "") + ( + delta.get("content") or "" + ) + self._nr_openai_attrs["role"] = self._nr_openai_attrs.get("role") or delta.get("role") + self._nr_openai_attrs["finish_reason"] = choices[0].get("finish_reason") + except Exception: + _logger.warning(STREAM_PARSING_FAILURE_LOG_MESSAGE % traceback.format_exception(*sys.exc_info())) + + +def _record_events_on_stop_iteration(self, transaction): + if hasattr(self, "_nr_ft"): + linking_metadata = get_trace_linking_metadata() + self._nr_ft.__exit__(None, None, None) + try: + openai_attrs = getattr(self, "_nr_openai_attrs", {}) + + # If there are no openai attrs exit early as there's no data to record. + if not openai_attrs: + return + + completion_id = str(uuid.uuid4()) + response_headers = openai_attrs.get("response_headers") or {} + _record_completion_success( + transaction, linking_metadata, completion_id, openai_attrs, self._nr_ft, response_headers, None + ) + except Exception: + _logger.warning(RECORD_EVENTS_FAILURE_LOG_MESSAGE % traceback.format_exception(*sys.exc_info())) + finally: + # Clear cached data as this can be very large. + # Note this is also important for not reporting the events twice. In openai v1 + # there are two loops around the iterator, the second is meant to clear the + # stream since there is a condition where the iterator may exit before all the + # stream contents is read. This results in StopIteration being raised twice + # instead of once at the end of the loop. + if hasattr(self, "_nr_openai_attrs"): + self._nr_openai_attrs.clear() + + +def _handle_streaming_completion_error(self, transaction, exc): + if hasattr(self, "_nr_ft"): + openai_attrs = getattr(self, "_nr_openai_attrs", {}) + + # If there are no openai attrs exit early as there's no data to record. + if not openai_attrs: + self._nr_ft.__exit__(*sys.exc_info()) + return + linking_metadata = get_trace_linking_metadata() + completion_id = str(uuid.uuid4()) + _record_completion_error(transaction, linking_metadata, completion_id, openai_attrs, self._nr_ft, exc) + + +class AsyncGeneratorProxy(ObjectProxy): + def __init__(self, wrapped): + super(AsyncGeneratorProxy, self).__init__(wrapped) + + def __aiter__(self): + self._nr_wrapped_iter = self.__wrapped__.__aiter__() + return self + + async def __anext__(self): + transaction = current_transaction() + if not transaction: + return await self._nr_wrapped_iter.__anext__() + + return_val = None + try: + return_val = await self._nr_wrapped_iter.__anext__() + _record_stream_chunk(self, return_val) + except StopAsyncIteration as e: + _record_events_on_stop_iteration(self, transaction) + raise + except Exception as exc: + _handle_streaming_completion_error(self, transaction, exc) + raise + return return_val + + async def aclose(self): + return await super(AsyncGeneratorProxy, self).aclose() + + +def wrap_stream_iter_events_sync(wrapped, instance, args, kwargs): + transaction = current_transaction() + if not transaction: + return wrapped(*args, **kwargs) + + settings = transaction.settings if transaction.settings is not None else global_settings() + if not settings.ai_monitoring.enabled or not settings.ai_monitoring.streaming.enabled: + return wrapped(*args, **kwargs) + + return_val = wrapped(*args, **kwargs) + proxied_return_val = GeneratorProxy(return_val) + set_attrs_on_generator_proxy(proxied_return_val, instance) + return proxied_return_val + + +def wrap_stream_iter_events_async(wrapped, instance, args, kwargs): + transaction = current_transaction() + if not transaction: + return wrapped(*args, **kwargs) + + settings = transaction.settings if transaction.settings is not None else global_settings() + if not settings.ai_monitoring.enabled or not settings.ai_monitoring.streaming.enabled: + return wrapped(*args, **kwargs) + + return_val = wrapped(*args, **kwargs) + proxied_return_val = AsyncGeneratorProxy(return_val) + set_attrs_on_generator_proxy(proxied_return_val, instance) + return proxied_return_val + + +def set_attrs_on_generator_proxy(proxy, instance): + """Pass the nr attributes to the generator proxy.""" + if hasattr(instance, "_nr_ft"): + proxy._nr_ft = instance._nr_ft + if hasattr(instance, "_nr_response_headers"): + proxy._nr_response_headers = instance._nr_response_headers + if hasattr(instance, "_nr_openai_attrs"): + proxy._nr_openai_attrs = instance._nr_openai_attrs + + +def wrap_engine_api_resource_create_sync(wrapped, instance, args, kwargs): + transaction = current_transaction() + if not transaction: + return wrapped(*args, **kwargs) + + settings = transaction.settings if transaction.settings is not None else global_settings() + if not settings.ai_monitoring.enabled: + return wrapped(*args, **kwargs) + + stream = is_stream(wrapped, args, kwargs) + return_val = wrapped(*args, **kwargs) + if stream and settings.ai_monitoring.streaming.enabled: + return GeneratorProxy(return_val) + else: + return return_val + + +async def wrap_engine_api_resource_create_async(wrapped, instance, args, kwargs): + transaction = current_transaction() + if not transaction: + return await wrapped(*args, **kwargs) + + settings = transaction.settings if transaction.settings is not None else global_settings() + if not settings.ai_monitoring.enabled: + return await wrapped(*args, **kwargs) + + stream = is_stream(wrapped, args, kwargs) + return_val = await wrapped(*args, **kwargs) + if stream and settings.ai_monitoring.streaming.enabled: + return AsyncGeneratorProxy(return_val) + else: + return return_val + + +def is_stream(wrapped, args, kwargs): + bound_args = bind_args(wrapped, args, kwargs) + return bound_args["params"].get("stream", False) + + +def _get_llm_attributes(transaction): + """Returns llm.* custom attributes off of the transaction.""" + custom_attrs_dict = transaction._custom_params + llm_metadata = {key: value for key, value in custom_attrs_dict.items() if key.startswith("llm.")} + return llm_metadata + + +def instrument_openai_api_resources_embedding(module): + if hasattr(module, "Embedding"): + if hasattr(module.Embedding, "create"): + wrap_function_wrapper(module, "Embedding.create", wrap_embedding_sync) + if hasattr(module.Embedding, "acreate"): + wrap_function_wrapper(module, "Embedding.acreate", wrap_embedding_async) + # This is to mark where we instrument so the SDK knows not to instrument them + # again. + setattr(module.Embedding, "_nr_wrapped", True) + + +def instrument_openai_api_resources_chat_completion(module): + if hasattr(module, "ChatCompletion"): + if hasattr(module.ChatCompletion, "create"): + wrap_function_wrapper(module, "ChatCompletion.create", wrap_chat_completion_sync) + if hasattr(module.ChatCompletion, "acreate"): + wrap_function_wrapper(module, "ChatCompletion.acreate", wrap_chat_completion_async) + # This is to mark where we instrument so the SDK knows not to instrument them + # again. + setattr(module.ChatCompletion, "_nr_wrapped", True) + + +def instrument_openai_resources_chat_completions(module): + if hasattr(module.Completions, "create"): + wrap_function_wrapper(module, "Completions.create", wrap_chat_completion_sync) + if hasattr(module.AsyncCompletions, "create"): + wrap_function_wrapper(module, "AsyncCompletions.create", wrap_chat_completion_async) + + +def instrument_openai_resources_embeddings(module): + if hasattr(module, "Embeddings"): + if hasattr(module.Embeddings, "create"): + wrap_function_wrapper(module, "Embeddings.create", wrap_embedding_sync) + + if hasattr(module, "AsyncEmbeddings"): + if hasattr(module.Embeddings, "create"): + wrap_function_wrapper(module, "AsyncEmbeddings.create", wrap_embedding_async) + + +def instrument_openai_util(module): + if hasattr(module, "convert_to_openai_object"): + wrap_function_wrapper(module, "convert_to_openai_object", wrap_convert_to_openai_object) + # This is to mark where we instrument so the SDK knows not to instrument them + # again. + setattr(module.convert_to_openai_object, "_nr_wrapped", True) + + +def instrument_openai_base_client(module): + if hasattr(module, "BaseClient") and hasattr(module.BaseClient, "_process_response"): + wrap_function_wrapper(module, "BaseClient._process_response", wrap_base_client_process_response_sync) + else: + if hasattr(module, "SyncAPIClient") and hasattr(module.SyncAPIClient, "_process_response"): + wrap_function_wrapper(module, "SyncAPIClient._process_response", wrap_base_client_process_response_sync) + if hasattr(module, "AsyncAPIClient") and hasattr(module.AsyncAPIClient, "_process_response"): + wrap_function_wrapper(module, "AsyncAPIClient._process_response", wrap_base_client_process_response_async) + + +def instrument_openai_api_resources_abstract_engine_api_resource(module): + if hasattr(module, "EngineAPIResource"): + if hasattr(module.EngineAPIResource, "create"): + wrap_function_wrapper(module, "EngineAPIResource.create", wrap_engine_api_resource_create_sync) + if hasattr(module.EngineAPIResource, "acreate"): + wrap_function_wrapper(module, "EngineAPIResource.acreate", wrap_engine_api_resource_create_async) + + +def instrument_openai__streaming(module): + if hasattr(module, "Stream"): + if hasattr(module.Stream, "_iter_events"): + wrap_function_wrapper(module, "Stream._iter_events", wrap_stream_iter_events_sync) + if hasattr(module, "AsyncStream"): + if hasattr(module.AsyncStream, "_iter_events"): + wrap_function_wrapper(module, "AsyncStream._iter_events", wrap_stream_iter_events_async) diff --git a/newrelic/hooks/template_genshi.py b/newrelic/hooks/template_genshi.py index db58237fd..e4ed9aead 100644 --- a/newrelic/hooks/template_genshi.py +++ b/newrelic/hooks/template_genshi.py @@ -12,33 +12,40 @@ # See the License for the specific language governing permissions and # limitations under the License. -import types - import newrelic.api.transaction import newrelic.common.object_wrapper import newrelic.api.function_trace + class stream_wrapper(object): def __init__(self, stream, filepath): self.__stream = stream self.__filepath = filepath + def render(self, *args, **kwargs): return newrelic.api.function_trace.FunctionTraceWrapper( - self.__stream.render, self.__filepath, - 'Template/Render')(*args, **kwargs) + self.__stream.render, self.__filepath, "Template/Render" + )(*args, **kwargs) + def __getattr__(self, name): return getattr(self.__stream, name) + def __iter__(self): return iter(self.__stream) + def __or__(self, function): return self.__stream.__or__(function) + def __str__(self): return self.__stream.__str__() + def __unicode__(self): return self.__stream.__unicode__() + def __html__(self): return self.__stream.__html__() + class wrap_template(object): def __init__(self, wrapped): if isinstance(wrapped, tuple): @@ -57,17 +64,16 @@ def __get__(self, instance, klass): def __call__(self, *args, **kwargs): current_transaction = newrelic.api.transaction.current_transaction() if current_transaction and self.__instance: - return stream_wrapper(self.__wrapped(*args, **kwargs), - self.__instance.filepath) + return stream_wrapper(self.__wrapped(*args, **kwargs), self.__instance.filepath) else: return self.__wrapped(*args, **kwargs) def __getattr__(self, name): return getattr(self.__wrapped, name) + def instrument(module): - if module.__name__ == 'genshi.template.base': + if module.__name__ == "genshi.template.base": - newrelic.common.object_wrapper.wrap_object( - module, 'Template.generate', wrap_template) + newrelic.common.object_wrapper.wrap_object(module, "Template.generate", wrap_template) diff --git a/tests/agent_features/conftest.py b/tests/agent_features/conftest.py index bd6aa6c2a..b8c8972d3 100644 --- a/tests/agent_features/conftest.py +++ b/tests/agent_features/conftest.py @@ -49,6 +49,7 @@ "test_asgi_browser.py", "test_asgi_distributed_tracing.py", "test_asgi_w3c_trace_context.py", + "test_ml_events.py", ] else: from testing_support.fixture.event_loop import event_loop diff --git a/tests/agent_features/test_configuration.py b/tests/agent_features/test_configuration.py index a75e30f58..f43b08495 100644 --- a/tests/agent_features/test_configuration.py +++ b/tests/agent_features/test_configuration.py @@ -24,6 +24,8 @@ import logging +from testing_support.fixtures import override_generic_settings + from newrelic.api.exceptions import ConfigurationError from newrelic.common.object_names import callable_name from newrelic.config import ( @@ -44,7 +46,6 @@ global_settings_dump, ) -from testing_support.fixtures import override_generic_settings def function_to_trace(): pass diff --git a/tests/agent_features/test_custom_events.py b/tests/agent_features/test_custom_events.py index 620179bfb..1951a291f 100644 --- a/tests/agent_features/test_custom_events.py +++ b/tests/agent_features/test_custom_events.py @@ -14,6 +14,7 @@ import time +import pytest from testing_support.fixtures import ( function_not_called, override_application_settings, @@ -23,10 +24,11 @@ validate_custom_event_count, validate_custom_event_in_application_stats_engine, ) +from testing_support.validators.validate_custom_events import validate_custom_events from newrelic.api.application import application_instance as application from newrelic.api.background_task import background_task -from newrelic.api.transaction import record_custom_event +from newrelic.api.transaction import current_transaction, record_custom_event from newrelic.core.custom_event import process_event_type @@ -128,6 +130,36 @@ def test_custom_event_inside_transaction_mixed_params(): record_custom_event("FooEvent", _mixed_params) +@override_application_settings({"custom_insights_events.max_attribute_value": 4095}) +@reset_core_stats_engine() +@validate_custom_event_in_application_stats_engine([_intrinsics, {"foo": "bar", "bar": "a" * 4095}]) +@background_task() +def test_custom_event_inside_transaction_max_attribute_value(): + record_custom_event("FooEvent", {"foo": "bar", 123: "bad key", "bar": "a" * 5000}) + + +@reset_core_stats_engine() +@validate_custom_event_in_application_stats_engine([_intrinsics, {"foo": "bar", "bar": "a" * 255}]) +@background_task() +def test_custom_event_inside_transaction_default_attribute_value(): + record_custom_event("FooEvent", {"foo": "bar", 123: "bad key", "bar": "a" * 5000}) + + +@override_application_settings({"custom_insights_events.max_attribute_value": 4095}) +@reset_core_stats_engine() +@validate_custom_event_in_application_stats_engine([_intrinsics, {"foo": "bar", "bar": "a" * 4095}]) +def test_custom_event_outside_transaction_max_attribute_value(): + app = application() + record_custom_event("FooEvent", {"foo": "bar", 123: "bad key", "bar": "a" * 5000}, application=app) + + +@reset_core_stats_engine() +@validate_custom_event_in_application_stats_engine([_intrinsics, {"foo": "bar", "bar": "a" * 255}]) +def test_custom_event_outside_transaction_default_attribute_value(): + app = application() + record_custom_event("FooEvent", {"foo": "bar", 123: "bad key", "bar": "a" * 5000}, application=app) + + @reset_core_stats_engine() @validate_custom_event_in_application_stats_engine(_event) @background_task() @@ -201,3 +233,63 @@ def test_transaction_create_custom_event_not_called(): def test_application_create_custom_event_not_called(): app = application() record_custom_event("FooEvent", _user_params, application=app) + + +# Test completness of LLM content/input despite attribute limits being set + + +@pytest.mark.parametrize( + "event_type,event_data,expected_event_data", + ( + [ + "LlmChatCompletionMessage", + { + "content": "A" * 9001, + "input": "B" * 9001, + "foo": "b" + "a" * 9000 + "r", + }, + { + "content": "A" * 9001, + "input": "B" * 300, + "foo": "b" + "a" * 299, + }, + ], + [ + "LlmEmbedding", + { + "content": "A" * 9001, + "input": "B" * 9001, + "foo": "b" + "a" * 9000 + "r", + }, + { + "content": "A" * 300, + "input": "B" * 9001, + "foo": "b" + "a" * 299, + }, + ], + [ + "MyCustomEvent", + { + "content": "A" * 9001, + "input": "B" * 9001, + "foo": "b" + "a" * 9000 + "r", + }, + { + "content": "A" * 300, + "input": "B" * 300, + "foo": "b" + "a" * 299, + }, + ], + ), +) +def test_create_custom_event_no_limit(event_type, event_data, expected_event_data): + @reset_core_stats_engine() + @override_application_settings({"custom_insights_events.max_attribute_value": 300}) + @validate_custom_event_count(1) + @validate_custom_events([({"type": event_type}, expected_event_data)]) + @background_task() + def _test(): + transaction = current_transaction() + transaction.record_custom_event(event_type, event_data) + + _test() diff --git a/tests/agent_features/test_exception_messages.py b/tests/agent_features/test_exception_messages.py index e9944f920..55ff30cac 100644 --- a/tests/agent_features/test_exception_messages.py +++ b/tests/agent_features/test_exception_messages.py @@ -13,29 +13,38 @@ # See the License for the specific language governing permissions and # limitations under the License. -import six import pytest +import six +from testing_support.fixtures import ( + reset_core_stats_engine, + set_default_encoding, + validate_application_exception_message, + validate_transaction_exception_message, +) from newrelic.api.application import application_instance as application from newrelic.api.background_task import background_task from newrelic.api.time_trace import notice_error -from testing_support.fixtures import (validate_transaction_exception_message, - set_default_encoding, validate_application_exception_message, - reset_core_stats_engine) - +# Turn off black formatting for this section of the code. +# While Python 2 has been EOL'd since 2020, New Relic still +# supports it and therefore these messages need to keep this +# specific formatting. +# fmt: off UNICODE_MESSAGE = u'I💜🐍' UNICODE_ENGLISH = u'I love python' BYTES_ENGLISH = b'I love python' BYTES_UTF8_ENCODED = b'I\xf0\x9f\x92\x9c\xf0\x9f\x90\x8d' INCORRECTLY_DECODED_BYTES_PY2 = u'I\u00f0\u009f\u0092\u009c\u00f0\u009f\u0090\u008d' INCORRECTLY_DECODED_BYTES_PY3 = u"b'I\\xf0\\x9f\\x92\\x9c\\xf0\\x9f\\x90\\x8d'" +# fmt: on # =================== Exception messages during transaction ==================== # ---------------- Python 2 + @pytest.mark.skipif(six.PY3, reason="Testing Python 2 string behavior") -@set_default_encoding('ascii') +@set_default_encoding("ascii") @validate_transaction_exception_message(UNICODE_MESSAGE) @background_task() def test_py2_transaction_exception_message_unicode(): @@ -46,8 +55,9 @@ def test_py2_transaction_exception_message_unicode(): except ValueError: notice_error() + @pytest.mark.skipif(six.PY3, reason="Testing Python 2 string behavior") -@set_default_encoding('ascii') +@set_default_encoding("ascii") @validate_transaction_exception_message(UNICODE_ENGLISH) @background_task() def test_py2_transaction_exception_message_unicode_english(): @@ -58,8 +68,9 @@ def test_py2_transaction_exception_message_unicode_english(): except ValueError: notice_error() + @pytest.mark.skipif(six.PY3, reason="Testing Python 2 string behavior") -@set_default_encoding('ascii') +@set_default_encoding("ascii") @validate_transaction_exception_message(UNICODE_ENGLISH) @background_task() def test_py2_transaction_exception_message_bytes_english(): @@ -69,8 +80,9 @@ def test_py2_transaction_exception_message_bytes_english(): except ValueError: notice_error() + @pytest.mark.skipif(six.PY3, reason="Testing Python 2 string behavior") -@set_default_encoding('ascii') +@set_default_encoding("ascii") @validate_transaction_exception_message(INCORRECTLY_DECODED_BYTES_PY2) @background_task() def test_py2_transaction_exception_message_bytes_non_english(): @@ -83,8 +95,9 @@ def test_py2_transaction_exception_message_bytes_non_english(): except ValueError: notice_error() + @pytest.mark.skipif(six.PY3, reason="Testing Python 2 string behavior") -@set_default_encoding('ascii') +@set_default_encoding("ascii") @validate_transaction_exception_message(INCORRECTLY_DECODED_BYTES_PY2) @background_task() def test_py2_transaction_exception_message_bytes_implicit_encoding_non_english(): @@ -93,16 +106,16 @@ def test_py2_transaction_exception_message_bytes_implicit_encoding_non_english() MESSAGE IS WRONG. We do not expect it to work now, or in the future. """ try: - # Bytes literal with non-ascii compatible characters only allowed in # python 2 - raise ValueError('I💜🐍') + raise ValueError("I💜🐍") except ValueError: notice_error() + @pytest.mark.skipif(six.PY3, reason="Testing Python 2 string behavior") -@set_default_encoding('utf-8') +@set_default_encoding("utf-8") @validate_transaction_exception_message(UNICODE_MESSAGE) @background_task() def test_py2_transaction_exception_message_unicode_utf8_encoding(): @@ -114,8 +127,9 @@ def test_py2_transaction_exception_message_unicode_utf8_encoding(): except ValueError: notice_error() + @pytest.mark.skipif(six.PY3, reason="Testing Python 2 string behavior") -@set_default_encoding('utf-8') +@set_default_encoding("utf-8") @validate_transaction_exception_message(UNICODE_MESSAGE) @background_task() def test_py2_transaction_exception_message_bytes_utf8_encoding_non_english(): @@ -123,16 +137,17 @@ def test_py2_transaction_exception_message_bytes_utf8_encoding_non_english(): encoding is also utf-8. """ try: - # Bytes literal with non-ascii compatible characters only allowed in # python 2 - raise ValueError('I💜🐍') + raise ValueError("I💜🐍") except ValueError: notice_error() + # ---------------- Python 3 + @pytest.mark.skipif(six.PY2, reason="Testing Python 3 string behavior") @validate_transaction_exception_message(UNICODE_MESSAGE) @background_task() @@ -144,6 +159,7 @@ def test_py3_transaction_exception_message_bytes_non_english_unicode(): except ValueError: notice_error() + @pytest.mark.skipif(six.PY2, reason="Testing Python 3 string behavior") @validate_transaction_exception_message(UNICODE_ENGLISH) @background_task() @@ -155,6 +171,7 @@ def test_py3_transaction_exception_message_unicode_english(): except ValueError: notice_error() + @pytest.mark.skipif(six.PY2, reason="Testing Python 3 string behavior") @validate_transaction_exception_message(INCORRECTLY_DECODED_BYTES_PY3) @background_task() @@ -171,13 +188,15 @@ def test_py3_transaction_exception_message_bytes_non_english(): except ValueError: notice_error() + # =================== Exception messages outside transaction ==================== # ---------------- Python 2 + @pytest.mark.skipif(six.PY3, reason="Testing Python 2 string behavior") @reset_core_stats_engine() -@set_default_encoding('ascii') +@set_default_encoding("ascii") @validate_application_exception_message(UNICODE_MESSAGE) def test_py2_application_exception_message_unicode(): """Assert unicode message when using non-ascii characters is preserved, @@ -188,9 +207,10 @@ def test_py2_application_exception_message_unicode(): app = application() notice_error(application=app) + @pytest.mark.skipif(six.PY3, reason="Testing Python 2 string behavior") @reset_core_stats_engine() -@set_default_encoding('ascii') +@set_default_encoding("ascii") @validate_application_exception_message(UNICODE_ENGLISH) def test_py2_application_exception_message_unicode_english(): """Assert unicode message when using ascii compatible characters preserved, @@ -201,9 +221,10 @@ def test_py2_application_exception_message_unicode_english(): app = application() notice_error(application=app) + @pytest.mark.skipif(six.PY3, reason="Testing Python 2 string behavior") @reset_core_stats_engine() -@set_default_encoding('ascii') +@set_default_encoding("ascii") @validate_application_exception_message(UNICODE_ENGLISH) def test_py2_application_exception_message_bytes_english(): """Assert byte string of ascii characters decodes sensibly""" @@ -213,9 +234,10 @@ def test_py2_application_exception_message_bytes_english(): app = application() notice_error(application=app) + @pytest.mark.skipif(six.PY3, reason="Testing Python 2 string behavior") @reset_core_stats_engine() -@set_default_encoding('ascii') +@set_default_encoding("ascii") @validate_application_exception_message(INCORRECTLY_DECODED_BYTES_PY2) def test_py2_application_exception_message_bytes_non_english(): """Assert known situation where (explicitly) utf-8 encoded byte string gets @@ -228,9 +250,10 @@ def test_py2_application_exception_message_bytes_non_english(): app = application() notice_error(application=app) + @pytest.mark.skipif(six.PY3, reason="Testing Python 2 string behavior") @reset_core_stats_engine() -@set_default_encoding('ascii') +@set_default_encoding("ascii") @validate_application_exception_message(INCORRECTLY_DECODED_BYTES_PY2) def test_py2_application_exception_message_bytes_implicit_encoding_non_english(): """Assert known situation where (implicitly) utf-8 encoded byte string gets @@ -238,18 +261,18 @@ def test_py2_application_exception_message_bytes_implicit_encoding_non_english() MESSAGE IS WRONG. We do not expect it to work now, or in the future. """ try: - # Bytes literal with non-ascii compatible characters only allowed in # python 2 - raise ValueError('I💜🐍') + raise ValueError("I💜🐍") except ValueError: app = application() notice_error(application=app) + @pytest.mark.skipif(six.PY3, reason="Testing Python 2 string behavior") @reset_core_stats_engine() -@set_default_encoding('utf-8') +@set_default_encoding("utf-8") @validate_application_exception_message(UNICODE_MESSAGE) def test_py2_application_exception_message_unicode_utf8_encoding(): """Assert unicode error message is preserved with sys non-default utf-8 @@ -261,26 +284,28 @@ def test_py2_application_exception_message_unicode_utf8_encoding(): app = application() notice_error(application=app) + @pytest.mark.skipif(six.PY3, reason="Testing Python 2 string behavior") @reset_core_stats_engine() -@set_default_encoding('utf-8') +@set_default_encoding("utf-8") @validate_application_exception_message(UNICODE_MESSAGE) def test_py2_application_exception_message_bytes_utf8_encoding_non_english(): """Assert utf-8 encoded byte produces correct exception message when sys encoding is also utf-8. """ try: - # Bytes literal with non-ascii compatible characters only allowed in # python 2 - raise ValueError('I💜🐍') + raise ValueError("I💜🐍") except ValueError: app = application() notice_error(application=app) + # ---------------- Python 3 + @pytest.mark.skipif(six.PY2, reason="Testing Python 3 string behavior") @reset_core_stats_engine() @validate_application_exception_message(UNICODE_MESSAGE) @@ -293,6 +318,7 @@ def test_py3_application_exception_message_bytes_non_english_unicode(): app = application() notice_error(application=app) + @pytest.mark.skipif(six.PY2, reason="Testing Python 3 string behavior") @reset_core_stats_engine() @validate_application_exception_message(UNICODE_ENGLISH) @@ -305,6 +331,7 @@ def test_py3_application_exception_message_unicode_english(): app = application() notice_error(application=app) + @pytest.mark.skipif(six.PY2, reason="Testing Python 3 string behavior") @reset_core_stats_engine() @validate_application_exception_message(INCORRECTLY_DECODED_BYTES_PY3) @@ -321,3 +348,15 @@ def test_py3_application_exception_message_bytes_non_english(): except ValueError: app = application() notice_error(application=app) + + +@reset_core_stats_engine() +@validate_application_exception_message("My custom message") +def test_nr_message_exception_attr_override(): + """Override the message using the _nr_message attribute.""" + try: + raise ValueError("Original error message") + except ValueError as e: + e._nr_message = "My custom message" + app = application() + notice_error(application=app) diff --git a/tests/agent_features/test_high_security_mode.py b/tests/agent_features/test_high_security_mode.py index 489228d94..ae64be2ef 100644 --- a/tests/agent_features/test_high_security_mode.py +++ b/tests/agent_features/test_high_security_mode.py @@ -85,6 +85,7 @@ def test_hsm_configuration_default(): "message_tracer.segment_parameters_enabled": True, "application_logging.forwarding.enabled": True, "machine_learning.inference_events_value.enabled": True, + "ai_monitoring.enabled": True, }, { "high_security": False, @@ -96,6 +97,7 @@ def test_hsm_configuration_default(): "message_tracer.segment_parameters_enabled": True, "application_logging.forwarding.enabled": True, "machine_learning.inference_events_value.enabled": True, + "ai_monitoring.enabled": True, }, { "high_security": False, @@ -107,6 +109,7 @@ def test_hsm_configuration_default(): "message_tracer.segment_parameters_enabled": False, "application_logging.forwarding.enabled": False, "machine_learning.inference_events_value.enabled": False, + "ai_monitoring.enabled": False, }, { "high_security": False, @@ -118,6 +121,7 @@ def test_hsm_configuration_default(): "message_tracer.segment_parameters_enabled": False, "application_logging.forwarding.enabled": False, "machine_learning.inference_events_value.enabled": False, + "ai_monitoring.enabled": False, }, ] @@ -132,6 +136,7 @@ def test_hsm_configuration_default(): "message_tracer.segment_parameters_enabled": True, "application_logging.forwarding.enabled": False, "machine_learning.inference_events_value.enabled": False, + "ai_monitoring.enabled": False, }, { "high_security": True, @@ -143,6 +148,7 @@ def test_hsm_configuration_default(): "message_tracer.segment_parameters_enabled": True, "application_logging.forwarding.enabled": False, "machine_learning.inference_events_value.enabled": False, + "ai_monitoring.enabled": False, }, { "high_security": True, @@ -154,6 +160,7 @@ def test_hsm_configuration_default(): "message_tracer.segment_parameters_enabled": True, "application_logging.forwarding.enabled": False, "machine_learning.inference_events_value.enabled": False, + "ai_monitoring.enabled": False, }, { "high_security": True, @@ -165,6 +172,7 @@ def test_hsm_configuration_default(): "message_tracer.segment_parameters_enabled": True, "application_logging.forwarding.enabled": True, "machine_learning.inference_events_value.enabled": True, + "ai_monitoring.enabled": True, }, { "high_security": True, @@ -176,6 +184,7 @@ def test_hsm_configuration_default(): "message_tracer.segment_parameters_enabled": True, "application_logging.forwarding.enabled": True, "machine_learning.inference_events_value.enabled": True, + "ai_monitoring.enabled": True, }, { "high_security": True, @@ -187,6 +196,7 @@ def test_hsm_configuration_default(): "message_tracer.segment_parameters_enabled": False, "application_logging.forwarding.enabled": True, "machine_learning.inference_events_value.enabled": True, + "ai_monitoring.enabled": True, }, { "high_security": True, @@ -198,6 +208,7 @@ def test_hsm_configuration_default(): "message_tracer.segment_parameters_enabled": False, "application_logging.forwarding.enabled": True, "machine_learning.inference_events_value.enabled": True, + "ai_monitoring.enabled": True, }, ] @@ -224,7 +235,7 @@ def test_local_config_file_override_hsm_disabled(settings): original_message_segment_params_enabled = settings.message_tracer.segment_parameters_enabled original_application_logging_forwarding_enabled = settings.application_logging.forwarding.enabled original_machine_learning_inference_event_value_enabled = settings.machine_learning.inference_events_value.enabled - + original_ai_monitoring_enabled = settings.ai_monitoring.enabled apply_local_high_security_mode_setting(settings) assert settings.capture_params == original_capture_params @@ -238,6 +249,7 @@ def test_local_config_file_override_hsm_disabled(settings): settings.machine_learning.inference_events_value.enabled == original_machine_learning_inference_event_value_enabled ) + assert settings.ai_monitoring.enabled == original_ai_monitoring_enabled @parameterize_hsm_local_config(_hsm_local_config_file_settings_enabled) @@ -252,6 +264,7 @@ def test_local_config_file_override_hsm_enabled(settings): assert settings.message_tracer.segment_parameters_enabled is False assert settings.application_logging.forwarding.enabled is False assert settings.machine_learning.inference_events_value.enabled is False + assert settings.ai_monitoring.enabled is False _server_side_config_settings_hsm_disabled = [ @@ -265,6 +278,7 @@ def test_local_config_file_override_hsm_enabled(settings): "ml_insights_events.enabled": False, "application_logging.forwarding.enabled": False, "machine_learning.inference_events_value.enabled": False, + "ai_monitoring.enabled": False, }, { "agent_config": { @@ -275,6 +289,7 @@ def test_local_config_file_override_hsm_enabled(settings): "ml_insights_events.enabled": True, "application_logging.forwarding.enabled": True, "machine_learning.inference_events_value.enabled": True, + "ai_monitoring.enabled": True, }, }, ), @@ -288,6 +303,7 @@ def test_local_config_file_override_hsm_enabled(settings): "ml_insights_events.enabled": True, "application_logging.forwarding.enabled": True, "machine_learning.inference_events_value.enabled": True, + "ai_monitoring.enabled": True, }, { "agent_config": { @@ -298,6 +314,7 @@ def test_local_config_file_override_hsm_enabled(settings): "ml_insights_events.enabled": False, "application_logging.forwarding.enabled": False, "machine_learning.inference_events_value.enabled": False, + "ai_monitoring.enabled": False, }, }, ), @@ -314,6 +331,7 @@ def test_local_config_file_override_hsm_enabled(settings): "ml_insights_events.enabled": False, "application_logging.forwarding.enabled": False, "machine_learning.inference_events_value.enabled": False, + "ai_monitoring.enabled": False, }, { "high_security": True, @@ -324,6 +342,7 @@ def test_local_config_file_override_hsm_enabled(settings): "ml_insights_events.enabled": False, "application_logging.forwarding.enabled": False, "machine_learning.inference_events_value.enabled": False, + "ai_monitoring.enabled": False, "agent_config": { "capture_params": False, "transaction_tracer.record_sql": "obfuscated", @@ -332,6 +351,7 @@ def test_local_config_file_override_hsm_enabled(settings): "ml_insights_events.enabled": False, "application_logging.forwarding.enabled": False, "machine_learning.inference_events_value.enabled": False, + "ai_monitoring.enabled": False, }, }, ), @@ -345,6 +365,7 @@ def test_local_config_file_override_hsm_enabled(settings): "ml_insights_events.enabled": False, "application_logging.forwarding.enabled": False, "machine_learning.inference_events_value.enabled": False, + "ai_monitoring.enabled": False, }, { "high_security": True, @@ -355,6 +376,7 @@ def test_local_config_file_override_hsm_enabled(settings): "ml_insights_events.enabled": False, "application_logging.forwarding.enabled": False, "machine_learning.inference_events_value.enabled": False, + "ai_monitoring.enabled": False, "agent_config": { "capture_params": True, "transaction_tracer.record_sql": "raw", @@ -363,6 +385,7 @@ def test_local_config_file_override_hsm_enabled(settings): "ml_insights_events.enabled": True, "application_logging.forwarding.enabled": True, "machine_learning.inference_events_value.enabled": True, + "ai_monitoring.enabled": True, }, }, ), @@ -385,6 +408,7 @@ def test_remote_config_fixups_hsm_disabled(local_settings, server_settings): original_ml_events = agent_config["ml_insights_events.enabled"] original_log_forwarding = agent_config["application_logging.forwarding.enabled"] original_machine_learning_events = agent_config["machine_learning.inference_events_value.enabled"] + original_ai_monitoring = agent_config["ai_monitoring.enabled"] _settings = global_settings() settings = override_generic_settings(_settings, local_settings)(AgentProtocol._apply_high_security_mode_fixups)( @@ -402,6 +426,7 @@ def test_remote_config_fixups_hsm_disabled(local_settings, server_settings): assert agent_config["ml_insights_events.enabled"] == original_ml_events assert agent_config["application_logging.forwarding.enabled"] == original_log_forwarding assert agent_config["machine_learning.inference_events_value.enabled"] == original_machine_learning_events + assert agent_config["ai_monitoring.enabled"] == original_ai_monitoring @pytest.mark.parametrize("local_settings,server_settings", _server_side_config_settings_hsm_enabled) @@ -426,6 +451,7 @@ def test_remote_config_fixups_hsm_enabled(local_settings, server_settings): assert "ml_insights_events.enabled" not in settings assert "application_logging.forwarding.enabled" not in settings assert "machine_learning.inference_events_value.enabled" not in settings + assert "ai_monitoring.enabled" not in settings assert "capture_params" not in agent_config assert "transaction_tracer.record_sql" not in agent_config @@ -434,6 +460,7 @@ def test_remote_config_fixups_hsm_enabled(local_settings, server_settings): assert "ml_insights_events.enabled" not in agent_config assert "application_logging.forwarding.enabled" not in agent_config assert "machine_learning.inference_events_value.enabled" not in agent_config + assert "ai_monitoring.enabled" not in agent_config def test_remote_config_hsm_fixups_server_side_disabled(): diff --git a/tests/agent_features/test_llm_token_count_callback.py b/tests/agent_features/test_llm_token_count_callback.py new file mode 100644 index 000000000..3d923fe84 --- /dev/null +++ b/tests/agent_features/test_llm_token_count_callback.py @@ -0,0 +1,69 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest + +from newrelic.api.application import application_instance as application +from newrelic.api.ml_model import set_llm_token_count_callback + + +def test_unset_llm_token_count_callback(): + settings = application().settings + + set_llm_token_count_callback(lambda model, content: 45) + + assert callable(settings.ai_monitoring.llm_token_count_callback) + + set_llm_token_count_callback(None) + + assert settings.ai_monitoring.llm_token_count_callback is None + + +@pytest.mark.parametrize( + "set_args,call_args,expected_value", + [ + ((lambda model, content: 45,), ("model", "content"), 45), + ((lambda model, content: 45, application().settings), ("model", "content"), 45), + ((lambda model, content: 1.1,), ("model", "content"), None), + ((lambda model, content: -1,), ("model", "content"), None), + ((lambda model, content: 45,), (None, "content"), None), + ((lambda model, content: 45,), ("model", None), None), + ], +) +def test_set_llm_token_count_callback(set_args, call_args, expected_value): + settings = application().settings + + set_llm_token_count_callback(*set_args) + + assert settings.ai_monitoring.llm_token_count_callback(*call_args) == expected_value + + +def test_exception_in_user_callback(): + settings = application().settings + + def user_exc(): + raise TypeError() + + set_llm_token_count_callback(user_exc) + + with pytest.raises(TypeError): + settings.ai_monitoring.llm_token_count_callback("model", "content") + + +def test_with_application_not_active(): + settings = application(activate=False).settings + + set_llm_token_count_callback(lambda model, content: 45) + + assert settings.ai_monitoring.llm_token_count_callback("model", "content") == 45 diff --git a/tests/agent_features/test_ml_events.py b/tests/agent_features/test_ml_events.py index 60797375d..96bb95f95 100644 --- a/tests/agent_features/test_ml_events.py +++ b/tests/agent_features/test_ml_events.py @@ -58,10 +58,41 @@ def core_app(collector_agent_registration): @validate_ml_event_payload( - [{"foo": "bar", "real_agent_id": "1234567", "event.domain": "newrelic.ml_events", "event.name": "InferenceEvent"}] + { + "apm": [ + { + "foo": "bar", + "real_agent_id": "1234567", + "event.domain": "newrelic.ml_events", + "event.name": "MyCustomEvent", + } + ] + } ) @reset_core_stats_engine() -def test_ml_event_payload_inside_transaction(core_app): +def test_ml_event_payload_noninference_event_inside_transaction(core_app): + @background_task(name="test_ml_event_payload_inside_transaction") + def _test(): + record_ml_event("MyCustomEvent", {"foo": "bar"}) + + _test() + core_app.harvest() + + +@validate_ml_event_payload( + { + "inference": [ + { + "foo": "bar", + "real_agent_id": "1234567", + "event.domain": "newrelic.ml_events", + "event.name": "InferenceEvent", + } + ] + } +) +@reset_core_stats_engine() +def test_ml_event_payload_inference_event_inside_transaction(core_app): @background_task(name="test_ml_event_payload_inside_transaction") def _test(): record_ml_event("InferenceEvent", {"foo": "bar"}) @@ -71,13 +102,106 @@ def _test(): @validate_ml_event_payload( - [{"foo": "bar", "real_agent_id": "1234567", "event.domain": "newrelic.ml_events", "event.name": "InferenceEvent"}] + { + "apm": [ + { + "foo": "bar", + "real_agent_id": "1234567", + "event.domain": "newrelic.ml_events", + "event.name": "MyCustomEvent", + } + ], + "inference": [ + { + "foo": "bar", + "real_agent_id": "1234567", + "event.domain": "newrelic.ml_events", + "event.name": "InferenceEvent", + } + ], + } ) @reset_core_stats_engine() -def test_ml_event_payload_outside_transaction(core_app): +def test_ml_event_payload_both_events_inside_transaction(core_app): + @background_task(name="test_ml_event_payload_inside_transaction") + def _test(): + record_ml_event("InferenceEvent", {"foo": "bar"}) + record_ml_event("MyCustomEvent", {"foo": "bar"}) + + _test() + core_app.harvest() + + +@validate_ml_event_payload( + { + "inference": [ + { + "foo": "bar", + "real_agent_id": "1234567", + "event.domain": "newrelic.ml_events", + "event.name": "InferenceEvent", + } + ] + } +) +@reset_core_stats_engine() +def test_ml_event_payload_inference_event_outside_transaction(core_app): + def _test(): + app = application() + record_ml_event("InferenceEvent", {"foo": "bar"}, application=app) + + _test() + core_app.harvest() + + +@validate_ml_event_payload( + { + "apm": [ + { + "foo": "bar", + "real_agent_id": "1234567", + "event.domain": "newrelic.ml_events", + "event.name": "MyCustomEvent", + } + ], + "inference": [ + { + "foo": "bar", + "real_agent_id": "1234567", + "event.domain": "newrelic.ml_events", + "event.name": "InferenceEvent", + } + ], + } +) +@reset_core_stats_engine() +def test_ml_event_payload_both_events_outside_transaction(core_app): def _test(): app = application() record_ml_event("InferenceEvent", {"foo": "bar"}, application=app) + record_ml_event("MyCustomEvent", {"foo": "bar"}, application=app) + + _test() + core_app.harvest() + + +@validate_ml_event_payload( + { + "apm": [ + { + "foo": "bar", + "real_agent_id": "1234567", + "event.domain": "newrelic.ml_events", + "event.name": "MyCustomEvent", + } + ] + } +) +@reset_core_stats_engine() +def test_ml_event_payload_noninference_event_outside_transaction(core_app): + def _test(): + app = application() + record_ml_event("MyCustomEvent", {"foo": "bar"}, application=app) _test() core_app.harvest() @@ -102,6 +226,62 @@ def _test(): _test() +@reset_core_stats_engine() +def test_record_ml_event_truncation_inside_transaction(): + @validate_ml_events([(_intrinsics, {"a": "a" * 4095})]) + @background_task() + def _test(): + record_ml_event("LabelEvent", {"a": "a" * 4100}) + + _test() + + +@reset_core_stats_engine() +def test_record_ml_event_truncation_outside_transaction(): + @validate_ml_events_outside_transaction([(_intrinsics, {"a": "a" * 4095})]) + def _test(): + app = application() + record_ml_event("LabelEvent", {"a": "a" * 4100}, application=app) + + _test() + + +@reset_core_stats_engine() +def test_record_ml_event_max_num_attrs(): + too_many_attrs_event = {} + for i in range(65): + too_many_attrs_event[str(i)] = str(i) + + max_attrs_event = {} + for i in range(64): + max_attrs_event[str(i)] = str(i) + + @validate_ml_events([(_intrinsics, max_attrs_event)]) + @background_task() + def _test(): + record_ml_event("LabelEvent", too_many_attrs_event) + + _test() + + +@reset_core_stats_engine() +def test_record_ml_event_max_num_attrs_outside_transaction(): + too_many_attrs_event = {} + for i in range(65): + too_many_attrs_event[str(i)] = str(i) + + max_attrs_event = {} + for i in range(64): + max_attrs_event[str(i)] = str(i) + + @validate_ml_events_outside_transaction([(_intrinsics, max_attrs_event)]) + def _test(): + app = application() + record_ml_event("LabelEvent", too_many_attrs_event, application=app) + + _test() + + @pytest.mark.parametrize( "params,expected", [ diff --git a/tests/agent_features/test_record_llm_feedback_event.py b/tests/agent_features/test_record_llm_feedback_event.py new file mode 100644 index 000000000..e61c7b530 --- /dev/null +++ b/tests/agent_features/test_record_llm_feedback_event.py @@ -0,0 +1,88 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from testing_support.fixtures import ( + reset_core_stats_engine, + validate_custom_event_count, +) +from testing_support.validators.validate_custom_events import validate_custom_events + +from newrelic.api.background_task import background_task +from newrelic.api.ml_model import record_llm_feedback_event + + +@reset_core_stats_engine() +def test_record_llm_feedback_event_all_args_supplied(): + llm_feedback_all_args_recorded_events = [ + ( + {"type": "LlmFeedbackMessage"}, + { + "id": None, + "trace_id": "123456789abcdefgh", + "category": "informative", + "rating": 1, + "ingest_source": "Python", + "message": "message", + "foo": "bar", + }, + ), + ] + + @validate_custom_events(llm_feedback_all_args_recorded_events) + @background_task() + def _test(): + record_llm_feedback_event( + rating=1, + trace_id="123456789abcdefgh", + category="informative", + message="message", + # Add metadata key with same name as built-in event key to verify no override occurs in the event + metadata={"foo": "bar", "message": "custom-message"}, + ) + + _test() + + +@reset_core_stats_engine() +def test_record_llm_feedback_event_required_args_supplied(): + llm_feedback_required_args_recorded_events = [ + ( + {"type": "LlmFeedbackMessage"}, + { + "id": None, + "rating": "Good", + "trace_id": "123456789abcdefgh", + "ingest_source": "Python", + }, + ), + ] + + @validate_custom_events(llm_feedback_required_args_recorded_events) + @background_task() + def _test(): + record_llm_feedback_event(trace_id="123456789abcdefgh", rating="Good") + + _test() + + +@reset_core_stats_engine() +@validate_custom_event_count(count=0) +def test_record_llm_feedback_event_outside_txn(): + record_llm_feedback_event( + rating="Good", + trace_id="123456789abcdefgh", + category="informative", + message="message", + metadata={"foo": "bar"}, + ) diff --git a/tests/agent_unittests/test_agent_connect.py b/tests/agent_unittests/test_agent_connect.py index eb944c072..ac5b7fd3f 100644 --- a/tests/agent_unittests/test_agent_connect.py +++ b/tests/agent_unittests/test_agent_connect.py @@ -13,26 +13,27 @@ # limitations under the License. import pytest +from testing_support.fixtures import failing_endpoint, override_generic_settings +from testing_support.validators.validate_internal_metrics import ( + validate_internal_metrics, +) + from newrelic.core.application import Application from newrelic.core.config import global_settings from newrelic.network.exceptions import ForceAgentDisconnect -from testing_support.fixtures import ( - override_generic_settings, - failing_endpoint -) -from testing_support.validators.validate_internal_metrics import validate_internal_metrics - - SETTINGS = global_settings() -@override_generic_settings(SETTINGS, { - 'developer_mode': True, -}) -@failing_endpoint('preconnect', raises=ForceAgentDisconnect) +@override_generic_settings( + SETTINGS, + { + "developer_mode": True, + }, +) +@failing_endpoint("preconnect", raises=ForceAgentDisconnect) def test_http_gone_stops_connect(): - app = Application('Python Agent Test (agent_unittests-connect)') + app = Application("Python Agent Test (agent_unittests-connect)") app.connect_to_data_collector(None) # The agent must not reattempt a connection after a ForceAgentDisconnect. @@ -48,28 +49,55 @@ def test_http_gone_stops_connect(): ] -@override_generic_settings(SETTINGS, { - 'developer_mode': True, -}) +@override_generic_settings( + SETTINGS, + { + "developer_mode": True, + }, +) @pytest.mark.parametrize("feature_setting,subfeature_setting", _logging_settings_matrix) def test_logging_connect_supportability_metrics(feature_setting, subfeature_setting): metric_value = "enabled" if feature_setting and subfeature_setting else "disabled" - @override_generic_settings(SETTINGS, { - "application_logging.enabled": feature_setting, - "application_logging.forwarding.enabled": subfeature_setting, - "application_logging.metrics.enabled": subfeature_setting, - "application_logging.local_decorating.enabled": subfeature_setting, - }) - @validate_internal_metrics([ - ("Supportability/Logging/Forwarding/Python/%s" % metric_value, 1), - ("Supportability/Logging/LocalDecorating/Python/%s" % metric_value, 1), - ("Supportability/Logging/Metrics/Python/%s" % metric_value, 1), - ]) + @override_generic_settings( + SETTINGS, + { + "application_logging.enabled": feature_setting, + "application_logging.forwarding.enabled": subfeature_setting, + "application_logging.metrics.enabled": subfeature_setting, + "application_logging.local_decorating.enabled": subfeature_setting, + }, + ) + @validate_internal_metrics( + [ + ("Supportability/Logging/Forwarding/Python/%s" % metric_value, 1), + ("Supportability/Logging/LocalDecorating/Python/%s" % metric_value, 1), + ("Supportability/Logging/Metrics/Python/%s" % metric_value, 1), + ] + ) def test(): - app = Application('Python Agent Test (agent_unittests-connect)') + app = Application("Python Agent Test (agent_unittests-connect)") app.connect_to_data_collector(None) assert app._active_session - + test() + + +@override_generic_settings( + SETTINGS, + { + "developer_mode": True, + "ai_monitoring.streaming.enabled": False, + }, +) +@validate_internal_metrics( + [ + ("Supportability/Python/ML/Streaming/Disabled", 1), + ] +) +def test_ml_streaming_disabled_supportability_metrics(): + app = Application("Python Agent Test (agent_unittests-connect)") + app.connect_to_data_collector(None) + + assert app._active_session diff --git a/tests/external_botocore/_mock_bedrock_encoding_utils.py b/tests/external_botocore/_mock_bedrock_encoding_utils.py new file mode 100644 index 000000000..36bdbebe8 --- /dev/null +++ b/tests/external_botocore/_mock_bedrock_encoding_utils.py @@ -0,0 +1,84 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Utilities for encoding and decoding streaming payloads from Bedrock. """ +import base64 +import binascii +import json + + +def crc(b): + """Encode the crc32 of the bytes stream into a 4 byte sequence.""" + return int_to_escaped_bytes(binascii.crc32(b), 4) + + +def int_to_escaped_bytes(i, num_bytes=1): + """Convert an integer into an arbitrary number of bytes.""" + return bytes.fromhex(("{:0" + str(num_bytes * 2) + "x}").format(i)) + + +def encode_headers(headers): + """Encode a dictionary of headers into bedrock's binary format.""" + new_headers = [] + for h, v in headers.items(): + if not h.startswith(":"): + h = ":%s" % h + h = h.encode("utf-8") + v = v.encode("utf-8") + new_headers.append(b"".join((int_to_escaped_bytes(len(h)), h, b"\x07\x00", int_to_escaped_bytes(len(v)), v))) + return b"".join(new_headers) + + +def decode_body(body): + """Decode the mixed JSON and base64 encoded body of a streaming response into a dictionary.""" + body = body.decode("utf-8") + body = json.loads(body) + body = body["bytes"] + body = base64.b64decode(body) + body = body.decode("utf-8") + return json.loads(body) + + +def encode_body(body, malformed_body=False): + """Encode a dictionary body into JSON, base64, then JSON again under a bytes key.""" + + body = json.dumps(body, separators=(",", ":")) + if malformed_body: + # Remove characters from end of body to make it unreadable + body = body[:-4] + + body = body.encode("utf-8") + body = base64.b64encode(body) + body = body.decode("utf-8") + body = {"bytes": body} + body = json.dumps(body, separators=(",", ":")) + body = body.encode("utf-8") + return body + + +def encode_streaming_payload(headers, body, malformed_body=False): + """Encode dictionary headers and dictionary body into bedrock's binary payload format including calculated lengths and CRC32.""" + headers = encode_headers(headers) + body = encode_body(body, malformed_body=malformed_body) + + header_length = len(headers) + payload_length = len(body) + total_length = 16 + payload_length + header_length + + prelude = int_to_escaped_bytes(total_length, 4) + int_to_escaped_bytes(header_length, 4) + prelude_crc = crc(prelude) + + payload = prelude + prelude_crc + headers + body + payload_crc = crc(payload) + + return payload + payload_crc diff --git a/tests/external_botocore/_mock_external_bedrock_server.py b/tests/external_botocore/_mock_external_bedrock_server.py new file mode 100644 index 000000000..9f28a6f11 --- /dev/null +++ b/tests/external_botocore/_mock_external_bedrock_server.py @@ -0,0 +1,6626 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import re + +from _mock_bedrock_encoding_utils import encode_streaming_payload +from testing_support.mock_external_http_server import MockExternalHTTPServer + +# This defines an external server test apps can make requests to instead of +# the real Bedrock backend. This provides 3 features: +# +# 1) This removes dependencies on external websites. +# 2) Provides a better mechanism for making an external call in a test app than +# simple calling another endpoint the test app makes available because this +# server will not be instrumented meaning we don't have to sort through +# transactions to separate the ones created in the test app and the ones +# created by an external call. +# 3) This app runs on a separate thread meaning it won't block the test app. + +STREAMED_RESPONSES = { + "amazon.titan-text-express-v1::User: The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.": [ + { + "Content-Type": "application/vnd.amazon.eventstream", + "x-amzn-RequestId": "884db5c9-18ab-4f27-8892-33656176a2e6", + }, + 200, + [ + "000001d30000004b8125915d0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a7664585277645852555a586830496a6f694945686c624778764c43426f623363675932467549456b676147567363434235623355676447396b59586b2f496977696157356b5a5867694f6a4173496e527664474673543356306348563056475634644652766132567551323931626e51694f6a45784c434a6a623231776247563061573975556d566863323975496a6f69526b6c4f53564e494969776961573577645852555a586830564739725a57354462335675644349364e7a4973496d467459587076626931695a57527962324e724c576c75646d396a595852706232354e5a58527961574e7a496a7037496d6c7563485630564739725a57354462335675644349364e7a4973496d393164484231644652766132567551323931626e51694f6a45784c434a70626e5a765932463061573975544746305a57356a655349364e7a59774c434a6d61584a7a64454a356447564d5958526c626d4e35496a6f334e6a423966513d3d227db357f684" + ], + ], + "anthropic.claude-instant-v1::The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.": [ + { + "Content-Type": "application/vnd.amazon.eventstream", + "x-amzn-RequestId": "1a72a1f6-310f-469c-af1d-2c59eb600089", + }, + 200, + [ + "000001a70000004b8d77d7520b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a306558426c496a6f696257567a6332466e5a56397a6447467964434973496d316c63334e685a3255694f6e7369615751694f694a6a62323177624638774d56684b4e315a4c4d3052574e304931556e4a6c516e7077595649795a4845694c434a306558426c496a6f696257567a6332466e5a534973496e4a76624755694f694a6863334e7063335268626e51694c434a6a623235305a573530496a7062585377696257396b5a5777694f694a6a624746315a4755746157357a64474675644330784c6a49694c434a7a6447397758334a6c59584e7662694936626e567362437769633352766346397a5a5846315a57356a5a534936626e56736243776964584e685a3255694f6e73696157357764585266644739725a57357a496a6f334d79776962335630634856305833527661325675637949364d58313966513d3d227dd65d4fce", + "000000d70000004bbff9e4380b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a306558426c496a6f695932397564475675644639696247396a6131397a6447467964434973496d6c755a475634496a6f774c434a6a623235305a57353058324a7362324e72496a7037496e5235634755694f694a305a58683049697769644756346443493649694a3966513d3d227dcdbf661d", + "000000db0000004b7a0909390b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a306558426c496a6f695932397564475675644639696247396a6131396b5a57783059534973496d6c755a475634496a6f774c434a6b5a5778305953493665794a306558426c496a6f69644756346446396b5a57783059534973496e526c654851694f694a495a57787362794a3966513d3d227d335563af", + "000000d30000004b4a7942f80b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a306558426c496a6f695932397564475675644639696247396a6131396b5a57783059534973496d6c755a475634496a6f774c434a6b5a5778305953493665794a306558426c496a6f69644756346446396b5a57783059534973496e526c654851694f694968496e3139227d2d9e8eb6", + "000000d70000004bbff9e4380b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a306558426c496a6f695932397564475675644639696247396a6131396b5a57783059534973496d6c755a475634496a6f774c434a6b5a5778305953493665794a306558426c496a6f69644756346446396b5a57783059534973496e526c654851694f69496754586b696658303d227d0d0604c6", + "000000db0000004b7a0909390b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a306558426c496a6f695932397564475675644639696247396a6131396b5a57783059534973496d6c755a475634496a6f774c434a6b5a5778305953493665794a306558426c496a6f69644756346446396b5a57783059534973496e526c654851694f694967626d46745a534a3966513d3d227d625a1a55", + "000000d70000004bbff9e4380b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a306558426c496a6f695932397564475675644639696247396a6131396b5a57783059534973496d6c755a475634496a6f774c434a6b5a5778305953493665794a306558426c496a6f69644756346446396b5a57783059534973496e526c654851694f69496761584d696658303d227dbcc5266b", + "000000db0000004b7a0909390b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a306558426c496a6f695932397564475675644639696247396a6131396b5a57783059534973496d6c755a475634496a6f774c434a6b5a5778305953493665794a306558426c496a6f69644756346446396b5a57783059534973496e526c654851694f694967513278686457526c496e3139227dafd2aab1", + "000000d30000004b4a7942f80b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a306558426c496a6f695932397564475675644639696247396a6131396b5a57783059534973496d6c755a475634496a6f774c434a6b5a5778305953493665794a306558426c496a6f69644756346446396b5a57783059534973496e526c654851694f694975496e3139227d2b9773b8", + "000000db0000004b7a0909390b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a306558426c496a6f695932397564475675644639696247396a6131396b5a57783059534973496d6c755a475634496a6f774c434a6b5a5778305953493665794a306558426c496a6f69644756346446396b5a57783059534973496e526c654851694f694a63626c787553446f696658303d227d796313c0", + "000000db0000004b7a0909390b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a306558426c496a6f695932397564475675644639696247396a6131396b5a57783059534973496d6c755a475634496a6f774c434a6b5a5778305953493665794a306558426c496a6f69644756346446396b5a57783059534973496e526c654851694f694967546d6c6a5a534a3966513d3d227d3fc8c903", + "000000d70000004bbff9e4380b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a306558426c496a6f695932397564475675644639696247396a6131396b5a57783059534973496d6c755a475634496a6f774c434a6b5a5778305953493665794a306558426c496a6f69644756346446396b5a57783059534973496e526c654851694f694967644738696658303d227dcd25ff7b", + "000000db0000004b7a0909390b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a306558426c496a6f695932397564475675644639696247396a6131396b5a57783059534973496d6c755a475634496a6f774c434a6b5a5778305953493665794a306558426c496a6f69644756346446396b5a57783059534973496e526c654851694f6949676257566c64434a3966513d3d227d9daba9d4", + "000000d70000004bbff9e4380b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a306558426c496a6f695932397564475675644639696247396a6131396b5a57783059534973496d6c755a475634496a6f774c434a6b5a5778305953493665794a306558426c496a6f69644756346446396b5a57783059534973496e526c654851694f69496765573931496e3139227d35994406", + "000000db0000004b7a0909390b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a306558426c496a6f695932397564475675644639696247396a6131396b5a57783059534973496d6c755a475634496a6f774c434a6b5a5778305953493665794a306558426c496a6f69644756346446396b5a57783059534973496e526c654851694f694967513278686457526c496e3139227dafd2aab1", + "000000d30000004b4a7942f80b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a306558426c496a6f695932397564475675644639696247396a6131396b5a57783059534973496d6c755a475634496a6f774c434a6b5a5778305953493665794a306558426c496a6f69644756346446396b5a57783059534973496e526c654851694f694975496e3139227d2b9773b8", + "000000d70000004bbff9e4380b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a306558426c496a6f695932397564475675644639696247396a6131396b5a57783059534973496d6c755a475634496a6f774c434a6b5a5778305953493665794a306558426c496a6f69644756346446396b5a57783059534973496e526c654851694f69496751324675496e3139227d874170f9", + "000000d70000004bbff9e4380b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a306558426c496a6f695932397564475675644639696247396a6131396b5a57783059534973496d6c755a475634496a6f774c434a6b5a5778305953493665794a306558426c496a6f69644756346446396b5a57783059534973496e526c654851694f69496765573931496e3139227d35994406", + "000000db0000004b7a0909390b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a306558426c496a6f695932397564475675644639696247396a6131396b5a57783059534973496d6c755a475634496a6f774c434a6b5a5778305953493665794a306558426c496a6f69644756346446396b5a57783059534973496e526c654851694f6949676447567362434a3966513d3d227de17e04f2", + "000000d70000004bbff9e4380b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a306558426c496a6f695932397564475675644639696247396a6131396b5a57783059534973496d6c755a475634496a6f774c434a6b5a5778305953493665794a306558426c496a6f69644756346446396b5a57783059534973496e526c654851694f694967625755696658303d227d27157827", + "000000d70000004bbff9e4380b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a306558426c496a6f695932397564475675644639696247396a6131396b5a57783059534973496d6c755a475634496a6f774c434a6b5a5778305953493665794a306558426c496a6f69644756346446396b5a57783059534973496e526c654851694f69496759534a3966513d3d227d93de2078", + "000000d70000004bbff9e4380b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a306558426c496a6f695932397564475675644639696247396a6131396b5a57783059534973496d6c755a475634496a6f774c434a6b5a5778305953493665794a306558426c496a6f69644756346446396b5a57783059534973496e526c654851694f694967596d6c30496e3139227d47f89aea", + "000000db0000004b7a0909390b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a306558426c496a6f695932397564475675644639696247396a6131396b5a57783059534973496d6c755a475634496a6f774c434a6b5a5778305953493665794a306558426c496a6f69644756346446396b5a57783059534973496e526c654851694f69496759574a76645851696658303d227defe54875", + "000000df0000004b8f89aff90b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a306558426c496a6f695932397564475675644639696247396a6131396b5a57783059534973496d6c755a475634496a6f774c434a6b5a5778305953493665794a306558426c496a6f69644756346446396b5a57783059534973496e526c654851694f69496765573931636e4e6c624759696658303d227d2c5dd674", + "000000d30000004b4a7942f80b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a306558426c496a6f695932397564475675644639696247396a6131396b5a57783059534973496d6c755a475634496a6f774c434a6b5a5778305953493665794a306558426c496a6f69644756346446396b5a57783059534973496e526c654851694f69492f496e3139227dac32c541", + "0000009b0000004b22fa51700b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a306558426c496a6f695932397564475675644639696247396a6131397a64473977496977696157356b5a5867694f6a4239227dc0567ebe", + ], + ], + "meta.llama2-13b-chat-v1::[INST] The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.": [ + { + "Content-Type": "application/vnd.amazon.eventstream", + "x-amzn-RequestId": "cce6b34c-812c-4f97-8885-515829aa9639", + }, + 200, + [ + "000000df0000004b8f89aff90b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f6949434973496e427962323177644639306232746c626c396a62335675644349364e7a5973496d646c626d56795958527062323566644739725a57356659323931626e51694f6a4573496e4e3062334266636d566863323975496a70756457787366513d3d227d37a74e44", + "000000e70000004b1ed85cbe0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f694945686c624778764969776963484a7662584230583352766132567558324e7664573530496a7075645778734c434a6e5a57356c636d463061573975583352766132567558324e7664573530496a6f794c434a7a6447397758334a6c59584e7662694936626e56736248303d227d82bd6987", + "000000df0000004b8f89aff90b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f6949534973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364d79776963335276634639795a57467a623234694f6d353162477839227d69a22395", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f6949456c304969776963484a7662584230583352766132567558324e7664573530496a7075645778734c434a6e5a57356c636d463061573975583352766132567558324e7664573530496a6f304c434a7a6447397758334a6c59584e7662694936626e56736248303d227d0c311931", + "000000df0000004b8f89aff90b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f694a794973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364e53776963335276634639795a57467a623234694f6d353162477839227d208768c0", + "000000df0000004b8f89aff90b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f6963794973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364e69776963335276634639795a57467a623234694f6d353162477839227da857c5f9", + "000000e70000004b1ed85cbe0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f69494764795a5746304969776963484a7662584230583352766132567558324e7664573530496a7075645778734c434a6e5a57356c636d463061573975583352766132567558324e7664573530496a6f334c434a7a6447397758334a6c59584e7662694936626e56736248303d227d1c1ffb32", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f69494852764969776963484a7662584230583352766132567558324e7664573530496a7075645778734c434a6e5a57356c636d463061573975583352766132567558324e7664573530496a6f344c434a7a6447397758334a6c59584e7662694936626e56736248303d227dc6a01d05", + "000000e70000004b1ed85cbe0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f6949485268624773694c434a77636d397463485266644739725a57356659323931626e51694f6d353162477773496d646c626d56795958527062323566644739725a57356659323931626e51694f6a6b73496e4e3062334266636d566863323975496a70756457787366513d3d227d1ade656f", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f69494852764969776963484a7662584230583352766132567558324e7664573530496a7075645778734c434a6e5a57356c636d463061573975583352766132567558324e7664573530496a6f784d43776963335276634639795a57467a623234694f6d353162477839227df38e415b", + "000000e70000004b1ed85cbe0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f6949486c7664534973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364d544573496e4e3062334266636d566863323975496a70756457787366513d3d227df8043fc6", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f6949534973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364d544973496e4e3062334266636d566863323975496a70756457787366513d3d227da3876c32", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f6949456b694c434a77636d397463485266644739725a57356659323931626e51694f6d353162477773496d646c626d56795958527062323566644739725a57356659323931626e51694f6a457a4c434a7a6447397758334a6c59584e7662694936626e56736248303d227df2429ce3", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f694a794973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364d545173496e4e3062334266636d566863323975496a70756457787366513d3d227d3e44d0ad", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f6962534973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364d545573496e4e3062334266636d566863323975496a70756457787366513d3d227d43dcbe46", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f69494746754969776963484a7662584230583352766132567558324e7664573530496a7075645778734c434a6e5a57356c636d463061573975583352766132567558324e7664573530496a6f784e69776963335276634639795a57467a623234694f6d353162477839227ddefd896e", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f69494545694c434a77636d397463485266644739725a57356659323931626e51694f6d353162477773496d646c626d56795958527062323566644739725a57356659323931626e51694f6a45334c434a7a6447397758334a6c59584e7662694936626e56736248303d227d865b949f", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f6953534973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364d546773496e4e3062334266636d566863323975496a70756457787366513d3d227d87c627db", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f694c434973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364d546b73496e4e3062334266636d566863323975496a70756457787366513d3d227d4efcc97d", + "000000e70000004b1ed85cbe0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f69494746755a434973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364d6a4173496e4e3062334266636d566863323975496a70756457787366513d3d227dc9fb2b36", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f6949456b694c434a77636d397463485266644739725a57356659323931626e51694f6d353162477773496d646c626d56795958527062323566644739725a57356659323931626e51694f6a49784c434a7a6447397758334a6c59584e7662694936626e56736248303d227d7d13633f", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f694a794973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364d6a4973496e4e3062334266636d566863323975496a70756457787366513d3d227dabef6b74", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f6962534973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364d6a4d73496e4e3062334266636d566863323975496a70756457787366513d3d227dd677059f", + "000000e70000004b1ed85cbe0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f694947686c636d55694c434a77636d397463485266644739725a57356659323931626e51694f6d353162477773496d646c626d56795958527062323566644739725a57356659323931626e51694f6a49304c434a7a6447397758334a6c59584e7662694936626e56736248303d227d778b6773", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f69494852764969776963484a7662584230583352766132567558324e7664573530496a7075645778734c434a6e5a57356c636d463061573975583352766132567558324e7664573530496a6f794e53776963335276634639795a57467a623234694f6d353162477839227d3f000197", + "000000e70000004b1ed85cbe0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f694947686c624841694c434a77636d397463485266644739725a57356659323931626e51694f6d353162477773496d646c626d56795958527062323566644739725a57356659323931626e51694f6a49324c434a7a6447397758334a6c59584e7662694936626e56736248303d227d262f2f7c", + "000000eb0000004bdb28b1bf0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f69494746756333646c63694973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364d6a6373496e4e3062334266636d566863323975496a70756457787366513d3d227dd2632962", + "000000e70000004b1ed85cbe0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f694947467565534973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364d6a6773496e4e3062334266636d566863323975496a70756457787366513d3d227d2ca266eb", + "000000ef0000004b2ea8177f0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f69494846315a584e306157397563794973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364d6a6b73496e4e3062334266636d566863323975496a70756457787366513d3d227d420dc939", + "000000e70000004b1ed85cbe0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f6949486c7664534973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364d7a4173496e4e3062334266636d566863323975496a70756457787366513d3d227d3ba3c73a", + "000000e70000004b1ed85cbe0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f69494731705a3268304969776963484a7662584230583352766132567558324e7664573530496a7075645778734c434a6e5a57356c636d463061573975583352766132567558324e7664573530496a6f7a4d53776963335276634639795a57467a623234694f6d353162477839227d742bb4c0", + "000000e70000004b1ed85cbe0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f6949476868646d55694c434a77636d397463485266644739725a57356659323931626e51694f6d353162477773496d646c626d56795958527062323566644739725a57356659323931626e51694f6a4d794c434a7a6447397758334a6c59584e7662694936626e56736248303d227d3ba80967", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f694c694973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364d7a4d73496e4e3062334266636d566863323975496a70756457787366513d3d227d7d7ec395", + "000000e70000004b1ed85cbe0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f694946646f595851694c434a77636d397463485266644739725a57356659323931626e51694f6d353162477773496d646c626d56795958527062323566644739725a57356659323931626e51694f6a4d304c434a7a6447397758334a6c59584e7662694936626e56736248303d227d2aff1e3e", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f694a794973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364d7a5573496e4e3062334266636d566863323975496a70756457787366513d3d227dfde32851", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f6963794973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364d7a5973496e4e3062334266636d566863323975496a70756457787366513d3d227de1891392", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f69494739754969776963484a7662584230583352766132567558324e7664573530496a7075645778734c434a6e5a57356c636d463061573975583352766132567558324e7664573530496a6f7a4e79776963335276634639795a57467a623234694f6d353162477839227d1923b2d1", + "000000e70000004b1ed85cbe0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f6949486c76645849694c434a77636d397463485266644739725a57356659323931626e51694f6d353162477773496d646c626d56795958527062323566644739725a57356659323931626e51694f6a4d344c434a7a6447397758334a6c59584e7662694936626e56736248303d227d0475ce8e", + "000000e70000004b1ed85cbe0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f6949473170626d51694c434a77636d397463485266644739725a57356659323931626e51694f6d353162477773496d646c626d56795958527062323566644739725a57356659323931626e51694f6a4d354c434a7a6447397758334a6c59584e7662694936626e56736248303d227d39ee9d93", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f6950794973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364e444173496e4e3062334266636d566863323975496a70756457787366513d3d227d23f8fbfc", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f6949434973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364e444573496e4e3062334266636d566863323975496a70756457787366513d3d227d034c8c7e", + "000000df0000004b8f89aff90b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f694969776963484a7662584230583352766132567558324e7664573530496a7075645778734c434a6e5a57356c636d463061573975583352766132567558324e7664573530496a6f304d69776963335276634639795a57467a623234694f6d353162477839227dd1325fa5", + "000000df0000004b8f89aff90b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f694969776963484a7662584230583352766132567558324e7664573530496a7075645778734c434a6e5a57356c636d463061573975583352766132567558324e7664573530496a6f304d79776963335276634639795a57467a623234694f6d353162477839227d57fad2a0", + "000000df0000004b8f89aff90b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f694969776963484a7662584230583352766132567558324e7664573530496a7075645778734c434a6e5a57356c636d463061573975583352766132567558324e7664573530496a6f304e43776963335276634639795a57467a623234694f6d353162477839227d2ba0111c", + "000000e70000004b1ed85cbe0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f69384a2b6b6c434973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364e445573496e4e3062334266636d566863323975496a70756457787366513d3d227d5170fd7c", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f69494552764969776963484a7662584230583352766132567558324e7664573530496a7075645778734c434a6e5a57356c636d463061573975583352766132567558324e7664573530496a6f304e69776963335276634639795a57467a623234694f6d353162477839227d20bec256", + "000000e70000004b1ed85cbe0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f6949486c7664534973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364e446373496e4e3062334266636d566863323975496a70756457787366513d3d227d3fb405c3", + "000000e70000004b1ed85cbe0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f6949476868646d55694c434a77636d397463485266644739725a57356659323931626e51694f6d353162477773496d646c626d56795958527062323566644739725a57356659323931626e51694f6a51344c434a7a6447397758334a6c59584e7662694936626e56736248303d227df9307c2f", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f69494745694c434a77636d397463485266644739725a57356659323931626e51694f6d353162477773496d646c626d56795958527062323566644739725a57356659323931626e51694f6a51354c434a7a6447397758334a6c59584e7662694936626e56736248303d227daac7d4f1", + "000000eb0000004bdb28b1bf0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f6949484e775a574e705a6d6c6a4969776963484a7662584230583352766132567558324e7664573530496a7075645778734c434a6e5a57356c636d463061573975583352766132567558324e7664573530496a6f314d43776963335276634639795a57467a623234694f6d353162477839227d8320a489", + "000000e70000004b1ed85cbe0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f694948527663476c6a4969776963484a7662584230583352766132567558324e7664573530496a7075645778734c434a6e5a57356c636d463061573975583352766132567558324e7664573530496a6f314d53776963335276634639795a57467a623234694f6d353162477839227d8422eb4f", + "000000e70000004b1ed85cbe0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f6949486c7664534973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364e544973496e4e3062334266636d566863323975496a70756457787366513d3d227d071ce2e2", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f694a794973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364e544d73496e4e3062334266636d566863323975496a70756457787366513d3d227ddc056360", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f695a434973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364e545173496e4e3062334266636d566863323975496a70756457787366513d3d227d41081c0a", + "000000e70000004b1ed85cbe0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f6949477870613255694c434a77636d397463485266644739725a57356659323931626e51694f6d353162477773496d646c626d56795958527062323566644739725a57356659323931626e51694f6a55314c434a7a6447397758334a6c59584e7662694936626e56736248303d227d25fd8278", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f69494852764969776963484a7662584230583352766132567558324e7664573530496a7075645778734c434a6e5a57356c636d463061573975583352766132567558324e7664573530496a6f314e69776963335276634639795a57467a623234694f6d353162477839227d7fdc9979", + "000000eb0000004bdb28b1bf0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f694947527063324e3163334d694c434a77636d397463485266644739725a57356659323931626e51694f6d353162477773496d646c626d56795958527062323566644739725a57356659323931626e51694f6a55334c434a7a6447397758334a6c59584e7662694936626e56736248303d227dbdcf1f52", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f694c434973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364e546773496e4e3062334266636d566863323975496a70756457787366513d3d227db1e41459", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f69494739794969776963484a7662584230583352766132567558324e7664573530496a7075645778734c434a6e5a57356c636d463061573975583352766132567558324e7664573530496a6f314f53776963335276634639795a57467a623234694f6d353162477839227d95ce8435", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f69494752764969776963484a7662584230583352766132567558324e7664573530496a7075645778734c434a6e5a57356c636d463061573975583352766132567558324e7664573530496a6f324d43776963335276634639795a57467a623234694f6d353162477839227da1fc1b06", + "000000e70000004b1ed85cbe0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f6949486c7664534973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364e6a4573496e4e3062334266636d566863323975496a70756457787366513d3d227dfe642df8", + "000000e70000004b1ed85cbe0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f6949477031633351694c434a77636d397463485266644739725a57356659323931626e51694f6d353162477773496d646c626d56795958527062323566644739725a57356659323931626e51694f6a59794c434a7a6447397758334a6c59584e7662694936626e56736248303d227d87d3b447", + "000000e70000004b1ed85cbe0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f6949486468626e51694c434a77636d397463485266644739725a57356659323931626e51694f6d353162477773496d646c626d56795958527062323566644739725a57356659323931626e51694f6a597a4c434a7a6447397758334a6c59584e7662694936626e56736248303d227db7527265", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f69494852764969776963484a7662584230583352766132567558324e7664573530496a7075645778734c434a6e5a57356c636d463061573975583352766132567558324e7664573530496a6f324e43776963335276634639795a57467a623234694f6d353162477839227dd2c7cbab", + "000000e70000004b1ed85cbe0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f6949474e6f595851694c434a77636d397463485266644739725a57356659323931626e51694f6d353162477773496d646c626d56795958527062323566644739725a57356659323931626e51694f6a59314c434a7a6447397758334a6c59584e7662694936626e56736248303d227d20af0e31", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f6950794973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364e6a5973496e4e3062334266636d566863323975496a70756457787366513d3d227d6f984397", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f6949434973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364e6a6373496e4e3062334266636d566863323975496a70756457787366513d3d227dc29ca445", + "000000df0000004b8f89aff90b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f694969776963484a7662584230583352766132567558324e7664573530496a7075645778734c434a6e5a57356c636d463061573975583352766132567558324e7664573530496a6f324f43776963335276634639795a57467a623234694f6d353162477839227d0a0f23b6", + "000000df0000004b8f89aff90b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f694969776963484a7662584230583352766132567558324e7664573530496a7075645778734c434a6e5a57356c636d463061573975583352766132567558324e7664573530496a6f324f53776963335276634639795a57467a623234694f6d353162477839227d8cc7aeb3", + "000000df0000004b8f89aff90b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f694969776963484a7662584230583352766132567558324e7664573530496a7075645778734c434a6e5a57356c636d463061573975583352766132567558324e7664573530496a6f334d43776963335276634639795a57467a623234694f6d353162477839227d7c290d77", + "000000e70000004b1ed85cbe0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f69384a2b5372434973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364e7a4573496e4e3062334266636d566863323975496a70756457787366513d3d227dc3364864", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f6949456b694c434a77636d397463485266644739725a57356659323931626e51694f6d353162477773496d646c626d56795958527062323566644739725a57356659323931626e51694f6a63794c434a7a6447397758334a6c59584e7662694936626e56736248303d227d92976f7d", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f694a794973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364e7a4d73496e4e3062334266636d566863323975496a70756457787366513d3d227d6e2881b6", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f6962534973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364e7a5173496e4e3062334266636d566863323975496a70756457787366513d3d227dedfdb5e0", + "000000e70000004b1ed85cbe0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f694947467362434973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364e7a5573496e4e3062334266636d566863323975496a70756457787366513d3d227dc682e026", + "000000e70000004b1ed85cbe0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f6949475668636e4d694c434a77636d397463485266644739725a57356659323931626e51694f6d353162477773496d646c626d56795958527062323566644739725a57356659323931626e51694f6a63324c434a7a6447397758334a6c59584e7662694936626e56736248303d227d8ee6d357", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f6949534973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364e7a6373496e4e3062334266636d566863323975496a70756457787366513d3d227d9d4f992d", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f6949434973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364e7a6773496e4e3062334266636d566863323975496a70756457787366513d3d227d6addbddd", + "000000df0000004b8f89aff90b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f694969776963484a7662584230583352766132567558324e7664573530496a7075645778734c434a6e5a57356c636d463061573975583352766132567558324e7664573530496a6f334f53776963335276634639795a57467a623234694f6d353162477839227d82204662", + "000000df0000004b8f89aff90b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f694969776963484a7662584230583352766132567558324e7664573530496a7075645778734c434a6e5a57356c636d463061573975583352766132567558324e7664573530496a6f344d43776963335276634639795a57467a623234694f6d353162477839227d549e9740", + "000000df0000004b8f89aff90b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f694969776963484a7662584230583352766132567558324e7664573530496a7075645778734c434a6e5a57356c636d463061573975583352766132567558324e7664573530496a6f344d53776963335276634639795a57467a623234694f6d353162477839227dd2561a45", + "000000e70000004b1ed85cbe0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f69384a2b5267694973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364f444973496e4e3062334266636d566863323975496a70756457787366513d3d227da5139607", + "0000018f0000004b7cc6b3970b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f694969776963484a7662584230583352766132567558324e7664573530496a7075645778734c434a6e5a57356c636d463061573975583352766132567558324e7664573530496a6f344d79776963335276634639795a57467a623234694f694a7a644739774969776959573168656d39754c574a6c5a484a76593273746157353262324e6864476c76626b316c64484a7059334d694f6e736961573577645852556232746c626b4e7664573530496a6f334e6977696233563063485630564739725a57354462335675644349364f444d73496d6c75646d396a595852706232354d5958526c626d4e35496a6f794d7a63314c434a6d61584a7a64454a356447564d5958526c626d4e35496a6f7a4e6a5a3966513d3d227d34e01b75", + ], + ], + "amazon.titan-text-express-v1::What is 212 degrees Fahrenheit converted to Celsius?": [ + { + "Content-Type": "application/vnd.amazon.eventstream", + "x-amzn-RequestId": "b427270f-371a-458d-81b6-a05aafb2704c", + }, + 200, + [ + "000002370000004bdae582ec0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a7664585277645852555a586830496a6f69584734784947526c5a334a6c5a534247595768795a57356f5a576c3049476c7a494441754e5459675a47566e636d566c637942445a57787a6158567a4c694255614756795a575a76636d5573494449784d69426b5a5764795a575567526d466f636d56756147567064434270626942445a57787a6158567a494864766457786b49474a6c494445784e5334334d6934694c434a70626d526c654349364d437769644739305957785064585277645852555a586830564739725a57354462335675644349364d7a5573496d4e76625842735a585270623235535a57467a623234694f694a475355354a553067694c434a70626e42316446526c654852556232746c626b4e7664573530496a6f784d69776959573168656d39754c574a6c5a484a76593273746157353262324e6864476c76626b316c64484a7059334d694f6e736961573577645852556232746c626b4e7664573530496a6f784d6977696233563063485630564739725a57354462335675644349364d7a5573496d6c75646d396a595852706232354d5958526c626d4e35496a6f794d7a4d354c434a6d61584a7a64454a356447564d5958526c626d4e35496a6f794d7a4d356658303d227d358ac004" + ], + ], + "anthropic.claude-instant-v1::Human: What is 212 degrees Fahrenheit converted to Celsius? Assistant:": [ + { + "Content-Type": "application/vnd.amazon.eventstream", + "x-amzn-RequestId": "a645548f-0b3a-47ce-a675-f51e6e9037de", + }, + 200, + [ + "000000af0000004b765b4f360b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f694945686c636d55694c434a7a6447397758334a6c59584e7662694936626e5673624377696333527663434936626e56736248303d227d71ffbf2d", + "000000af0000004b765b4f360b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f69494746795a534973496e4e3062334266636d566863323975496a7075645778734c434a7a64473977496a70756457787366513d3d227d9f82f061", + "000000af0000004b765b4f360b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f694948526f5a534973496e4e3062334266636d566863323975496a7075645778734c434a7a64473977496a70756457787366513d3d227dee0662df", + "000000af0000004b765b4f360b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f6949484e305a58427a4969776963335276634639795a57467a623234694f6d353162477773496e4e30623341694f6d353162477839227d76bf1639", + "000000ab0000004b83dbe9f60b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f69494852764969776963335276634639795a57467a623234694f6d353162477773496e4e30623341694f6d353162477839227daf097af1", + "000000b30000004bd34b35b50b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f6949474e76626e5a6c636e51694c434a7a6447397758334a6c59584e7662694936626e5673624377696333527663434936626e56736248303d227d5955803a", + "000000af0000004b765b4f360b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f69494449784d694973496e4e3062334266636d566863323975496a7075645778734c434a7a64473977496a70756457787366513d3d227dfa89690e", + "000000b30000004bd34b35b50b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f694947526c5a334a6c5a584d694c434a7a6447397758334a6c59584e7662694936626e5673624377696333527663434936626e56736248303d227dbe5287e4", + "000000ab0000004b83dbe9f60b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f69494559694c434a7a6447397758334a6c59584e7662694936626e5673624377696333527663434936626e56736248303d227d8732a806", + "000000b30000004bd34b35b50b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f69595768795a57356f5a576c304969776963335276634639795a57467a623234694f6d353162477773496e4e30623341694f6d353162477839227d066744eb", + "000000ab0000004b83dbe9f60b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f69494852764969776963335276634639795a57467a623234694f6d353162477773496e4e30623341694f6d353162477839227daf097af1", + "000000b30000004bd34b35b50b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f6949454e6c62484e7064584d694c434a7a6447397758334a6c59584e7662694936626e5673624377696333527663434936626e56736248303d227df62aca9e", + "000000ab0000004b83dbe9f60b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f694f694973496e4e3062334266636d566863323975496a7075645778734c434a7a64473977496a70756457787366513d3d227de96ff0b6", + "000000af0000004b765b4f360b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f6958473563626a45694c434a7a6447397758334a6c59584e7662694936626e5673624377696333527663434936626e56736248303d227df6e5e085", + "000000ab0000004b83dbe9f60b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f694b534973496e4e3062334266636d566863323975496a7075645778734c434a7a64473977496a70756457787366513d3d227d82afca0e", + "000000af0000004b765b4f360b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f694946526f5a534973496e4e3062334266636d566863323975496a7075645778734c434a7a64473977496a70756457787366513d3d227d73834b92", + "000000b30000004bd34b35b50b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f6949475a76636d3131624745694c434a7a6447397758334a6c59584e7662694936626e5673624377696333527663434936626e56736248303d227d3ad98743", + "000000ab0000004b83dbe9f60b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f69494852764969776963335276634639795a57467a623234694f6d353162477773496e4e30623341694f6d353162477839227daf097af1", + "000000b30000004bd34b35b50b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f6949474e76626e5a6c636e51694c434a7a6447397758334a6c59584e7662694936626e5673624377696333527663434936626e56736248303d227d5955803a", + "000000b30000004bd34b35b50b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f6949474a6c6448646c5a5734694c434a7a6447397758334a6c59584e7662694936626e5673624377696333527663434936626e56736248303d227deb6a0bd6", + "000000ab0000004b83dbe9f60b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f69494559694c434a7a6447397758334a6c59584e7662694936626e5673624377696333527663434936626e56736248303d227d8732a806", + "000000b30000004bd34b35b50b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f69595768795a57356f5a576c304969776963335276634639795a57467a623234694f6d353162477773496e4e30623341694f6d353162477839227d066744eb", + "000000af0000004b765b4f360b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f69494746755a434973496e4e3062334266636d566863323975496a7075645778734c434a7a64473977496a70756457787366513d3d227d7e666d0f", + "000000b30000004bd34b35b50b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f6949454e6c62484e7064584d694c434a7a6447397758334a6c59584e7662694936626e5673624377696333527663434936626e56736248303d227df62aca9e", + "000000ab0000004b83dbe9f60b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f6949476c7a4969776963335276634639795a57467a623234694f6d353162477773496e4e30623341694f6d353162477839227d9a64e3c4", + "000000ab0000004b83dbe9f60b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f694f694973496e4e3062334266636d566863323975496a7075645778734c434a7a64473977496a70756457787366513d3d227de96ff0b6", + "000000af0000004b765b4f360b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f695847346749434973496e4e3062334266636d566863323975496a7075645778734c434a7a64473977496a70756457787366513d3d227d4292c7bb", + "000000ab0000004b83dbe9f60b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f6949454d694c434a7a6447397758334a6c59584e7662694936626e5673624377696333527663434936626e56736248303d227d1c09da34", + "000000ab0000004b83dbe9f60b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f69494430694c434a7a6447397758334a6c59584e7662694936626e5673624377696333527663434936626e56736248303d227db45e8aa5", + "000000ab0000004b83dbe9f60b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f69494367694c434a7a6447397758334a6c59584e7662694936626e5673624377696333527663434936626e56736248303d227d1f5f0f41", + "000000ab0000004b83dbe9f60b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f6952694973496e4e3062334266636d566863323975496a7075645778734c434a7a64473977496a70756457787366513d3d227dc48bec13", + "000000ab0000004b83dbe9f60b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f69494330694c434a7a6447397758334a6c59584e7662694936626e5673624377696333527663434936626e56736248303d227dae344b5e", + "000000ab0000004b83dbe9f60b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f6949444d794969776963335276634639795a57467a623234694f6d353162477773496e4e30623341694f6d353162477839227d8d3ee747", + "000000ab0000004b83dbe9f60b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f694b534973496e4e3062334266636d566863323975496a7075645778734c434a7a64473977496a70756457787366513d3d227d82afca0e", + "000000ab0000004b83dbe9f60b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f6949436f694c434a7a6447397758334a6c59584e7662694936626e5673624377696333527663434936626e56736248303d227d29a16fe0", + "000000ab0000004b83dbe9f60b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f69494455694c434a7a6447397758334a6c59584e7662694936626e5673624377696333527663434936626e56736248303d227dc85354c4", + "000000ab0000004b83dbe9f60b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f694c794973496e4e3062334266636d566863323975496a7075645778734c434a7a64473977496a70756457787366513d3d227d26f20099", + "000000ab0000004b83dbe9f60b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f694f534973496e4e3062334266636d566863323975496a7075645778734c434a7a64473977496a70756457787366513d3d227dfff8a709", + "000000af0000004b765b4f360b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f6958473563626a49694c434a7a6447397758334a6c59584e7662694936626e5673624377696333527663434936626e56736248303d227d36dc3354", + "000000ab0000004b83dbe9f60b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f694b534973496e4e3062334266636d566863323975496a7075645778734c434a7a64473977496a70756457787366513d3d227d82afca0e", + "000000ab0000004b83dbe9f60b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f69494642734969776963335276634639795a57467a623234694f6d353162477773496e4e30623341694f6d353162477839227de1cc18f5", + "000000ab0000004b83dbe9f60b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f69645763694c434a7a6447397758334a6c59584e7662694936626e5673624377696333527663434936626e56736248303d227d7e451c81", + "000000ab0000004b83dbe9f60b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f6949476c754969776963335276634639795a57467a623234694f6d353162477773496e4e30623341694f6d353162477839227da2508214", + "000000af0000004b765b4f360b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f69494449784d694973496e4e3062334266636d566863323975496a7075645778734c434a7a64473977496a70756457787366513d3d227dfa89690e", + "000000b30000004bd34b35b50b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f694947526c5a334a6c5a584d694c434a7a6447397758334a6c59584e7662694936626e5673624377696333527663434936626e56736248303d227dbe5287e4", + "000000ab0000004b83dbe9f60b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f69494559694c434a7a6447397758334a6c59584e7662694936626e5673624377696333527663434936626e56736248303d227d8732a806", + "000000b30000004bd34b35b50b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f69595768795a57356f5a576c304969776963335276634639795a57467a623234694f6d353162477773496e4e30623341694f6d353162477839227d066744eb", + "000000af0000004b765b4f360b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f6949475a7663694973496e4e3062334266636d566863323975496a7075645778734c434a7a64473977496a70756457787366513d3d227dd72b242b", + "000000ab0000004b83dbe9f60b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f69494559694c434a7a6447397758334a6c59584e7662694936626e5673624377696333527663434936626e56736248303d227d8732a806", + "000000ab0000004b83dbe9f60b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f694f694973496e4e3062334266636d566863323975496a7075645778734c434a7a64473977496a70756457787366513d3d227de96ff0b6", + "000000af0000004b765b4f360b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f695847346749434973496e4e3062334266636d566863323975496a7075645778734c434a7a64473977496a70756457787366513d3d227d4292c7bb", + "000000ab0000004b83dbe9f60b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f6949454d694c434a7a6447397758334a6c59584e7662694936626e5673624377696333527663434936626e56736248303d227d1c09da34", + "000000ab0000004b83dbe9f60b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f69494430694c434a7a6447397758334a6c59584e7662694936626e5673624377696333527663434936626e56736248303d227db45e8aa5", + "000000ab0000004b83dbe9f60b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f69494367694c434a7a6447397758334a6c59584e7662694936626e5673624377696333527663434936626e56736248303d227d1f5f0f41", + "000000ab0000004b83dbe9f60b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f694d6a45794969776963335276634639795a57467a623234694f6d353162477773496e4e30623341694f6d353162477839227d184cd7ac", + "000000ab0000004b83dbe9f60b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f69494330694c434a7a6447397758334a6c59584e7662694936626e5673624377696333527663434936626e56736248303d227dae344b5e", + "000000ab0000004b83dbe9f60b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f6949444d794969776963335276634639795a57467a623234694f6d353162477773496e4e30623341694f6d353162477839227d8d3ee747", + "000000ab0000004b83dbe9f60b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f694b534973496e4e3062334266636d566863323975496a7075645778734c434a7a64473977496a70756457787366513d3d227d82afca0e", + "000000ab0000004b83dbe9f60b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f6949436f694c434a7a6447397758334a6c59584e7662694936626e5673624377696333527663434936626e56736248303d227d29a16fe0", + "000000ab0000004b83dbe9f60b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f69494455694c434a7a6447397758334a6c59584e7662694936626e5673624377696333527663434936626e56736248303d227dc85354c4", + "000000ab0000004b83dbe9f60b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f694c794973496e4e3062334266636d566863323975496a7075645778734c434a7a64473977496a70756457787366513d3d227d26f20099", + "000000ab0000004b83dbe9f60b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f694f534973496e4e3062334266636d566863323975496a7075645778734c434a7a64473977496a70756457787366513d3d227dfff8a709", + "000000af0000004b765b4f360b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f695847346749434973496e4e3062334266636d566863323975496a7075645778734c434a7a64473977496a70756457787366513d3d227d4292c7bb", + "000000ab0000004b83dbe9f60b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f6949454d694c434a7a6447397758334a6c59584e7662694936626e5673624377696333527663434936626e56736248303d227d1c09da34", + "000000ab0000004b83dbe9f60b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f69494430694c434a7a6447397758334a6c59584e7662694936626e5673624377696333527663434936626e56736248303d227db45e8aa5", + "000000af0000004b765b4f360b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f69494445344d434973496e4e3062334266636d566863323975496a7075645778734c434a7a64473977496a70756457787366513d3d227ddeedbeac", + "000000ab0000004b83dbe9f60b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f6949436f694c434a7a6447397758334a6c59584e7662694936626e5673624377696333527663434936626e56736248303d227d29a16fe0", + "000000ab0000004b83dbe9f60b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f69494455694c434a7a6447397758334a6c59584e7662694936626e5673624377696333527663434936626e56736248303d227dc85354c4", + "000000ab0000004b83dbe9f60b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f694c794973496e4e3062334266636d566863323975496a7075645778734c434a7a64473977496a70756457787366513d3d227d26f20099", + "000000ab0000004b83dbe9f60b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f694f534973496e4e3062334266636d566863323975496a7075645778734c434a7a64473977496a70756457787366513d3d227dfff8a709", + "000000af0000004b765b4f360b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f695847346749434973496e4e3062334266636d566863323975496a7075645778734c434a7a64473977496a70756457787366513d3d227d4292c7bb", + "000000ab0000004b83dbe9f60b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f6949454d694c434a7a6447397758334a6c59584e7662694936626e5673624377696333527663434936626e56736248303d227d1c09da34", + "000000ab0000004b83dbe9f60b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f69494430694c434a7a6447397758334a6c59584e7662694936626e5673624377696333527663434936626e56736248303d227db45e8aa5", + "000000af0000004b765b4f360b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f69494445774d434973496e4e3062334266636d566863323975496a7075645778734c434a7a64473977496a70756457787366513d3d227d0c0fabb4", + "000000af0000004b765b4f360b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f6958473563626a4d694c434a7a6447397758334a6c59584e7662694936626e5673624377696333527663434936626e56736248303d227dc01b8024", + "000000ab0000004b83dbe9f60b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f694b534973496e4e3062334266636d566863323975496a7075645778734c434a7a64473977496a70756457787366513d3d227d82afca0e", + "000000b70000004b26cb93750b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f694946526f5a584a6c5a6d39795a534973496e4e3062334266636d566863323975496a7075645778734c434a7a64473977496a70756457787366513d3d227d8f97117a", + "000000ab0000004b83dbe9f60b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f694c434973496e4e3062334266636d566863323975496a7075645778734c434a7a64473977496a70756457787366513d3d227d30655726", + "000000af0000004b765b4f360b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f69494449784d694973496e4e3062334266636d566863323975496a7075645778734c434a7a64473977496a70756457787366513d3d227dfa89690e", + "000000b30000004bd34b35b50b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f694947526c5a334a6c5a584d694c434a7a6447397758334a6c59584e7662694936626e5673624377696333527663434936626e56736248303d227dbe5287e4", + "000000ab0000004b83dbe9f60b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f69494559694c434a7a6447397758334a6c59584e7662694936626e5673624377696333527663434936626e56736248303d227d8732a806", + "000000b30000004bd34b35b50b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f69595768795a57356f5a576c304969776963335276634639795a57467a623234694f6d353162477773496e4e30623341694f6d353162477839227d066744eb", + "000000ab0000004b83dbe9f60b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f6949476c7a4969776963335276634639795a57467a623234694f6d353162477773496e4e30623341694f6d353162477839227d9a64e3c4", + "000000af0000004b765b4f360b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f6949475678645746734969776963335276634639795a57467a623234694f6d353162477773496e4e30623341694f6d353162477839227d08092f6c", + "000000ab0000004b83dbe9f60b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f69494852764969776963335276634639795a57467a623234694f6d353162477773496e4e30623341694f6d353162477839227daf097af1", + "000000af0000004b765b4f360b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f69494445774d434973496e4e3062334266636d566863323975496a7075645778734c434a7a64473977496a70756457787366513d3d227d0c0fabb4", + "000000b30000004bd34b35b50b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f694947526c5a334a6c5a584d694c434a7a6447397758334a6c59584e7662694936626e5673624377696333527663434936626e56736248303d227dbe5287e4", + "000000b30000004bd34b35b50b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f6949454e6c62484e7064584d694c434a7a6447397758334a6c59584e7662694936626e5673624377696333527663434936626e56736248303d227df62aca9e", + "000000ab0000004b83dbe9f60b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f694c694973496e4e3062334266636d566863323975496a7075645778734c434a7a64473977496a70756457787366513d3d227dbe4ddce4", + "0000016b0000004ba192d2880b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f694969776963335276634639795a57467a623234694f694a7a6447397758334e6c6358566c626d4e6c496977696333527663434936496c78755847354964573168626a6f694c434a686257463662323474596d566b636d396a61793170626e5a76593246306157397554575630636d6c6a6379493665794a70626e4231644652766132567551323931626e51694f6a45354c434a7664585277645852556232746c626b4e7664573530496a6f354f5377696157353262324e6864476c76626b78686447567559336b694f6a45314e7a4173496d5a70636e4e30516e6c305a5578686447567559336b694f6a51784d583139227d9a4fc171", + ], + ], + "cohere.command-text-v14::What is 212 degrees Fahrenheit converted to Celsius?": [ + { + "Content-Type": "application/vnd.amazon.eventstream", + "x-amzn-RequestId": "4f8ab6c5-42d1-4e35-9573-30f9f41f821e", + }, + 200, + [ + "000003f70000004bf8acb9920b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d46306157397563794936573373695a6d6c7561584e6f58334a6c59584e7662694936496b4e505456424d5256524649697769615751694f694a695a5463794e475a6d5953316c5a6a63344c54517a4d444974595464684e7930794e6a4a6a4e4463784f54566a4e5755694c434a305a586830496a6f694946527649474e76626e5a6c636e5167526d466f636d56756147567064434230627942445a57787a6158567a4c43423562335567593246754948567a5a534230614755675a6d39796258567359547063626c78755132567363326c316379413949436847595768795a57356f5a576c30494330674d7a497049436f674e53383558473563626b6c754948526f61584d675932467a5a537767615759676557393149476868646d55674d6a45794947526c5a334a6c5a584d67526d466f636d567561475670644377676557393149474e686269423163325567644768706379426d62334a74645778684948527649474e6862474e31624746305a534230614755675a58463161585a6862475675644342305a5731775a584a68644856795a534270626942445a57787a6158567a4f6c7875584735445a57787a6158567a494430674b4449784d69417449444d794b534171494455764f534139494445774d434171494455764f5341394944557758473563626c526f5a584a6c5a6d39795a5377674d6a45794947526c5a334a6c5a584d67526d466f636d567561475670644342706379426c63585670646d46735a57353049485276494455774947526c5a334a6c5a584d675132567363326c316379346966563073496d6c6b496a6f694e47593459574932597a55744e444a6b4d5330305a544d314c546b314e7a4d744d7a426d4f5759304d5759344d6a466c4969776963484a7662584230496a6f695632686864434270637941794d5449675a47566e636d566c63794247595768795a57356f5a576c3049474e76626e5a6c636e526c5a434230627942445a57787a6158567a50794973496d467459587076626931695a57527962324e724c576c75646d396a595852706232354e5a58527961574e7a496a7037496d6c7563485630564739725a57354462335675644349364f5377696233563063485630564739725a57354462335675644349364f544573496d6c75646d396a595852706232354d5958526c626d4e35496a6f794f5463794c434a6d61584a7a64454a356447564d5958526c626d4e35496a6f794f5463796658303d227deba065e0" + ], + ], + "meta.llama2-13b-chat-v1::What is 212 degrees Fahrenheit converted to Celsius?": [ + { + "Content-Type": "application/vnd.amazon.eventstream", + "x-amzn-RequestId": "6dd99878-0919-4f92-850c-48f50f923b76", + }, + 200, + [ + "000000df0000004b8f89aff90b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f6949434973496e427962323177644639306232746c626c396a62335675644349364d546373496d646c626d56795958527062323566644739725a57356659323931626e51694f6a4573496e4e3062334266636d566863323975496a70756457787366513d3d227d8ad5573b", + "000000e70000004b1ed85cbe0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f694946646f595851694c434a77636d397463485266644739725a57356659323931626e51694f6d353162477773496d646c626d56795958527062323566644739725a57356659323931626e51694f6a4973496e4e3062334266636d566863323975496a70756457787366513d3d227dc79406b1", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f6949476c7a4969776963484a7662584230583352766132567558324e7664573530496a7075645778734c434a6e5a57356c636d463061573975583352766132567558324e7664573530496a6f7a4c434a7a6447397758334a6c59584e7662694936626e56736248303d227d03c98d5f", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f694948526f5a534973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364e43776963335276634639795a57467a623234694f6d353162477839227d87f1a596", + "000000ef0000004b2ea8177f0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f6949474e76626e5a6c636e4e70623234694c434a77636d397463485266644739725a57356659323931626e51694f6d353162477773496d646c626d56795958527062323566644739725a57356659323931626e51694f6a5573496e4e3062334266636d566863323975496a70756457787366513d3d227d10bef8bd", + "000000eb0000004bdb28b1bf0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f6949475a76636d3131624745694c434a77636d397463485266644739725a57356659323931626e51694f6d353162477773496d646c626d56795958527062323566644739725a57356659323931626e51694f6a5973496e4e3062334266636d566863323975496a70756457787366513d3d227d587688a5", + "000000df0000004b8f89aff90b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f6950794973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364e79776963335276634639795a57467a623234694f6d353162477839227d2a55ad0a", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f69584734694c434a77636d397463485266644739725a57356659323931626e51694f6d353162477773496d646c626d56795958527062323566644739725a57356659323931626e51694f6a6773496e4e3062334266636d566863323975496a70756457787366513d3d227d6967bb80", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f69584734694c434a77636d397463485266644739725a57356659323931626e51694f6d353162477773496d646c626d56795958527062323566644739725a57356659323931626e51694f6a6b73496e4e3062334266636d566863323975496a70756457787366513d3d227dfbf995fe", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f694d694973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364d544173496e4e3062334266636d566863323975496a70756457787366513d3d227d2d794c92", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f694d534973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364d544573496e4e3062334266636d566863323975496a70756457787366513d3d227d28b94ab1", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f694d694973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364d544973496e4e3062334266636d566863323975496a70756457787366513d3d227dce6d78c6", + "000000eb0000004bdb28b1bf0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f694947526c5a334a6c5a584d694c434a77636d397463485266644739725a57356659323931626e51694f6d353162477773496d646c626d56795958527062323566644739725a57356659323931626e51694f6a457a4c434a7a6447397758334a6c59584e7662694936626e56736248303d227d4f48e7b9", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f69494559694c434a77636d397463485266644739725a57356659323931626e51694f6d353162477773496d646c626d56795958527062323566644739725a57356659323931626e51694f6a45304c434a7a6447397758334a6c59584e7662694936626e56736248303d227d0992a7e0", + "000000e70000004b1ed85cbe0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f69595768795a5734694c434a77636d397463485266644739725a57356659323931626e51694f6d353162477773496d646c626d56795958527062323566644739725a57356659323931626e51694f6a45314c434a7a6447397758334a6c59584e7662694936626e56736248303d227ddcb2ffa3", + "000000e70000004b1ed85cbe0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f696147567064434973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364d545973496e4e3062334266636d566863323975496a70756457787366513d3d227d53be207a", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f6949476c7a4969776963484a7662584230583352766132567558324e7664573530496a7075645778734c434a6e5a57356c636d463061573975583352766132567558324e7664573530496a6f784e79776963335276634639795a57467a623234694f6d353162477839227d14ca1e11", + "000000e70000004b1ed85cbe0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f6949475678645746734969776963484a7662584230583352766132567558324e7664573530496a7075645778734c434a6e5a57356c636d463061573975583352766132567558324e7664573530496a6f784f43776963335276634639795a57467a623234694f6d353162477839227de610d9c7", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f69494852764969776963484a7662584230583352766132567558324e7664573530496a7075645778734c434a6e5a57356c636d463061573975583352766132567558324e7664573530496a6f784f53776963335276634639795a57467a623234694f6d353162477839227d0d870a4e", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f6949434973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364d6a4173496e4e3062334266636d566863323975496a70756457787366513d3d227dad6d87d8", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f694d534973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364d6a4573496e4e3062334266636d566863323975496a70756457787366513d3d227d435fabd5", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f694d434973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364d6a4973496e4e3062334266636d566863323975496a70756457787366513d3d227d57d9bb71", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f694d434973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364d6a4d73496e4e3062334266636d566863323975496a70756457787366513d3d227d2653a15b", + "000000eb0000004bdb28b1bf0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f694947526c5a334a6c5a584d694c434a77636d397463485266644739725a57356659323931626e51694f6d353162477773496d646c626d56795958527062323566644739725a57356659323931626e51694f6a49304c434a7a6447397758334a6c59584e7662694936626e56736248303d227db83e662f", + "000000e70000004b1ed85cbe0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f6949454e6c62434973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364d6a5573496e4e3062334266636d566863323975496a70756457787366513d3d227d49763324", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f6963326b694c434a77636d397463485266644739725a57356659323931626e51694f6d353162477773496d646c626d56795958527062323566644739725a57356659323931626e51694f6a49324c434a7a6447397758334a6c59584e7662694936626e56736248303d227d57ef43c7", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f6964584d694c434a77636d397463485266644739725a57356659323931626e51694f6d353162477773496d646c626d56795958527062323566644739725a57356659323931626e51694f6a49334c434a7a6447397758334a6c59584e7662694936626e56736248303d227d31fa2ded", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f694c694973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364d6a6773496e4e3062334266636d566863323975496a70756457787366513d3d227d45d624b4", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f69584734694c434a77636d397463485266644739725a57356659323931626e51694f6d353162477773496d646c626d56795958527062323566644739725a57356659323931626e51694f6a49354c434a7a6447397758334a6c59584e7662694936626e56736248303d227d50170c09", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f69584734694c434a77636d397463485266644739725a57356659323931626e51694f6d353162477773496d646c626d56795958527062323566644739725a57356659323931626e51694f6a4d774c434a7a6447397758334a6c59584e7662694936626e56736248303d227d334aff43", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f695647686c4969776963484a7662584230583352766132567558324e7664573530496a7075645778734c434a6e5a57356c636d463061573975583352766132567558324e7664573530496a6f7a4d53776963335276634639795a57467a623234694f6d353162477839227da3b5dcb6", + "000000ef0000004b2ea8177f0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f6949474e76626e5a6c636e4e70623234694c434a77636d397463485266644739725a57356659323931626e51694f6d353162477773496d646c626d56795958527062323566644739725a57356659323931626e51694f6a4d794c434a7a6447397758334a6c59584e7662694936626e56736248303d227d04d2363e", + "000000eb0000004bdb28b1bf0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f6949475a76636d3131624745694c434a77636d397463485266644739725a57356659323931626e51694f6d353162477773496d646c626d56795958527062323566644739725a57356659323931626e51694f6a4d7a4c434a7a6447397758334a6c59584e7662694936626e56736248303d227dcdac512b", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f6949476c7a4969776963484a7662584230583352766132567558324e7664573530496a7075645778734c434a6e5a57356c636d463061573975583352766132567558324e7664573530496a6f7a4e43776963335276634639795a57467a623234694f6d353162477839227d31fe2917", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f694f694973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364d7a5573496e4e3062334266636d566863323975496a70756457787366513d3d227d12ef5dd9", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f69584734694c434a77636d397463485266644739725a57356659323931626e51694f6d353162477773496d646c626d56795958527062323566644739725a57356659323931626e51694f6a4d324c434a7a6447397758334a6c59584e7662694936626e56736248303d227d092a8c14", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f69584734694c434a77636d397463485266644739725a57356659323931626e51694f6d353162477773496d646c626d56795958527062323566644739725a57356659323931626e51694f6a4d334c434a7a6447397758334a6c59584e7662694936626e56736248303d227d07cd64c5", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f69777241694c434a77636d397463485266644739725a57356659323931626e51694f6d353162477773496d646c626d56795958527062323566644739725a57356659323931626e51694f6a4d344c434a7a6447397758334a6c59584e7662694936626e56736248303d227d3ac46cfa", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f6951794973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364d7a6b73496e4e3062334266636d566863323975496a70756457787366513d3d227db44f4efe", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f69494430694c434a77636d397463485266644739725a57356659323931626e51694f6d353162477773496d646c626d56795958527062323566644739725a57356659323931626e51694f6a51774c434a7a6447397758334a6c59584e7662694936626e56736248303d227d20eaba75", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f69494367694c434a77636d397463485266644739725a57356659323931626e51694f6d353162477773496d646c626d56795958527062323566644739725a57356659323931626e51694f6a51784c434a7a6447397758334a6c59584e7662694936626e56736248303d227d7e72f783", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f69777241694c434a77636d397463485266644739725a57356659323931626e51694f6d353162477773496d646c626d56795958527062323566644739725a57356659323931626e51694f6a51794c434a7a6447397758334a6c59584e7662694936626e56736248303d227df85c19b2", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f6952694973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364e444d73496e4e3062334266636d566863323975496a70756457787366513d3d227dd6166c06", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f69494330694c434a77636d397463485266644739725a57356659323931626e51694f6d353162477773496d646c626d56795958527062323566644739725a57356659323931626e51694f6a51304c434a7a6447397758334a6c59584e7662694936626e56736248303d227d73d3f258", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f6949434973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364e445573496e4e3062334266636d566863323975496a70756457787366513d3d227d1e15e297", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f694d794973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364e445973496e4e3062334266636d566863323975496a70756457787366513d3d227de161d81d", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f694d694973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364e446373496e4e3062334266636d566863323975496a70756457787366513d3d227d9b436cbd", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f694b534973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364e446773496e4e3062334266636d566863323975496a70756457787366513d3d227d664be53d", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f69494d4f584969776963484a7662584230583352766132567558324e7664573530496a7075645778734c434a6e5a57356c636d463061573975583352766132567558324e7664573530496a6f304f53776963335276634639795a57467a623234694f6d353162477839227d437bffb5", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f6949434973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364e544173496e4e3062334266636d566863323975496a70756457787366513d3d227dab0d95e6", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f694e534973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364e544573496e4e3062334266636d566863323975496a70756457787366513d3d227dd4e37d1a", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f694c794973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364e544973496e4e3062334266636d566863323975496a70756457787366513d3d227d5547f6e9", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f694f534973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364e544d73496e4e3062334266636d566863323975496a70756457787366513d3d227d47430ae1", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f69584734694c434a77636d397463485266644739725a57356659323931626e51694f6d353162477773496d646c626d56795958527062323566644739725a57356659323931626e51694f6a55304c434a7a6447397758334a6c59584e7662694936626e56736248303d227deaa8070b", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f69584734694c434a77636d397463485266644739725a57356659323931626e51694f6d353162477773496d646c626d56795958527062323566644739725a57356659323931626e51694f6a55314c434a7a6447397758334a6c59584e7662694936626e56736248303d227de44fefda", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f69553238694c434a77636d397463485266644739725a57356659323931626e51694f6d353162477773496d646c626d56795958527062323566644739725a57356659323931626e51694f6a55324c434a7a6447397758334a6c59584e7662694936626e56736248303d227dcc1d90ca", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f694c434973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364e546373496e4e3062334266636d566863323975496a70756457787366513d3d227dc06e0e73", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f69494852764969776963484a7662584230583352766132567558324e7664573530496a7075645778734c434a6e5a57356c636d463061573975583352766132567558324e7664573530496a6f314f43776963335276634639795a57467a623234694f6d353162477839227dfd8f11d0", + "000000eb0000004bdb28b1bf0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f6949474e76626e5a6c636e51694c434a77636d397463485266644739725a57356659323931626e51694f6d353162477773496d646c626d56795958527062323566644739725a57356659323931626e51694f6a55354c434a7a6447397758334a6c59584e7662694936626e56736248303d227d9fe07832", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f6949434973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364e6a4173496e4e3062334266636d566863323975496a70756457787366513d3d227dc0eb7482", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f694d694973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364e6a4573496e4e3062334266636d566863323975496a70756457787366513d3d227d5a934486", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f694d534973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364e6a4973496e4e3062334266636d566863323975496a70756457787366513d3d227dbc4776f1", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f694d694973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364e6a4d73496e4e3062334266636d566863323975496a70756457787366513d3d227db98770d2", + "000000eb0000004bdb28b1bf0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f694947526c5a334a6c5a584d694c434a77636d397463485266644739725a57356659323931626e51694f6d353162477773496d646c626d56795958527062323566644739725a57356659323931626e51694f6a59304c434a7a6447397758334a6c59584e7662694936626e56736248303d227da56708c6", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f69494559694c434a77636d397463485266644739725a57356659323931626e51694f6d353162477773496d646c626d56795958527062323566644739725a57356659323931626e51694f6a59314c434a7a6447397758334a6c59584e7662694936626e56736248303d227d88b20fa6", + "000000e70000004b1ed85cbe0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f69595768795a5734694c434a77636d397463485266644739725a57356659323931626e51694f6d353162477773496d646c626d56795958527062323566644739725a57356659323931626e51694f6a59324c434a7a6447397758334a6c59584e7662694936626e56736248303d227d405d8647", + "000000e70000004b1ed85cbe0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f696147567064434973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364e6a6373496e4e3062334266636d566863323975496a70756457787366513d3d227da9e4b83e", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f69494852764969776963484a7662584230583352766132567558324e7664573530496a7075645778734c434a6e5a57356c636d463061573975583352766132567558324e7664573530496a6f324f43776963335276634639795a57467a623234694f6d353162477839227deea728a3", + "000000e70000004b1ed85cbe0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f6949454e6c62434973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364e6a6b73496e4e3062334266636d566863323975496a70756457787366513d3d227da940502e", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f6963326b694c434a77636d397463485266644739725a57356659323931626e51694f6d353162477773496d646c626d56795958527062323566644739725a57356659323931626e51694f6a63774c434a7a6447397758334a6c59584e7662694936626e56736248303d227d8cecd403", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f6964584d694c434a77636d397463485266644739725a57356659323931626e51694f6d353162477773496d646c626d56795958527062323566644739725a57356659323931626e51694f6a63784c434a7a6447397758334a6c59584e7662694936626e56736248303d227dbb968e47", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f694c434973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364e7a4973496e4e3062334266636d566863323975496a70756457787366513d3d227d93200836", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f694948646c4969776963484a7662584230583352766132567558324e7664573530496a7075645778734c434a6e5a57356c636d463061573975583352766132567558324e7664573530496a6f334d79776963335276634639795a57467a623234694f6d353162477839227dd66d2ee7", + "000000e70000004b1ed85cbe0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f6949474e6862694973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364e7a5173496e4e3062334266636d566863323975496a70756457787366513d3d227dec7f3121", + "000000e70000004b1ed85cbe0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f694948567a5a534973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364e7a5573496e4e3062334266636d566863323975496a70756457787366513d3d227dbb1f0174", + "000000e70000004b1ed85cbe0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f694948526f5a534973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364e7a5973496e4e3062334266636d566863323975496a70756457787366513d3d227db9f2b3dd", + "000000eb0000004bdb28b1bf0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f6949475a76636d3131624745694c434a77636d397463485266644739725a57356659323931626e51694f6d353162477773496d646c626d56795958527062323566644739725a57356659323931626e51694f6a63334c434a7a6447397758334a6c59584e7662694936626e56736248303d227d2b853909", + "000000e70000004b1ed85cbe0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f6949477870613255694c434a77636d397463485266644739725a57356659323931626e51694f6d353162477773496d646c626d56795958527062323566644739725a57356659323931626e51694f6a63344c434a7a6447397758334a6c59584e7662694936626e56736248303d227d7e216de9", + "000000e70000004b1ed85cbe0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f694948526f61584d694c434a77636d397463485266644739725a57356659323931626e51694f6d353162477773496d646c626d56795958527062323566644739725a57356659323931626e51694f6a63354c434a7a6447397758334a6c59584e7662694936626e56736248303d227db006b745", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f694f694973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364f444173496e4e3062334266636d566863323975496a70756457787366513d3d227d5cde6a12", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f69584734694c434a77636d397463485266644739725a57356659323931626e51694f6d353162477773496d646c626d56795958527062323566644739725a57356659323931626e51694f6a67784c434a7a6447397758334a6c59584e7662694936626e56736248303d227d8da1c76f", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f69584734694c434a77636d397463485266644739725a57356659323931626e51694f6d353162477773496d646c626d56795958527062323566644739725a57356659323931626e51694f6a67794c434a7a6447397758334a6c59584e7662694936626e56736248303d227d83462fbe", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f69777241694c434a77636d397463485266644739725a57356659323931626e51694f6d353162477773496d646c626d56795958527062323566644739725a57356659323931626e51694f6a677a4c434a7a6447397758334a6c59584e7662694936626e56736248303d227d85d084c5", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f6951794973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364f445173496e4e3062334266636d566863323975496a70756457787366513d3d227d6a97878c", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f69494430694c434a77636d397463485266644739725a57356659323931626e51694f6d353162477773496d646c626d56795958527062323566644739725a57356659323931626e51694f6a67314c434a7a6447397758334a6c59584e7662694936626e56736248303d227d67065455", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f69494367694c434a77636d397463485266644739725a57356659323931626e51694f6d353162477773496d646c626d56795958527062323566644739725a57356659323931626e51694f6a67324c434a7a6447397758334a6c59584e7662694936626e56736248303d227d753efc6f", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f694d694973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364f446373496e4e3062334266636d566863323975496a70756457787366513d3d227dbfc13d8b", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f694d534973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364f446773496e4e3062334266636d566863323975496a70756457787366513d3d227dba013ba8", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f694d694973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364f446b73496e4e3062334266636d566863323975496a70756457787366513d3d227d5cd509df", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f69494330694c434a77636d397463485266644739725a57356659323931626e51694f6d353162477773496d646c626d56795958527062323566644739725a57356659323931626e51694f6a6b774c434a7a6447397758334a6c59584e7662694936626e56736248303d227da846dad7", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f6949434973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364f544573496e4e3062334266636d566863323975496a70756457787366513d3d227dfe05defa", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f694d794973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364f544973496e4e3062334266636d566863323975496a70756457787366513d3d227d0171e470", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f694d694973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364f544d73496e4e3062334266636d566863323975496a70756457787366513d3d227df6e3c080", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f694b534973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364f545173496e4e3062334266636d566863323975496a70756457787366513d3d227df5a613bd", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f69494d4f584969776963484a7662584230583352766132567558324e7664573530496a7075645778734c434a6e5a57356c636d463061573975583352766132567558324e7664573530496a6f354e53776963335276634639795a57467a623234694f6d353162477839227d4a635728", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f6949434973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364f545973496e4e3062334266636d566863323975496a70756457787366513d3d227d71c29e6d", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f694e534973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364f546373496e4e3062334266636d566863323975496a70756457787366513d3d227d839ce6c1", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f694c794973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364f546773496e4e3062334266636d566863323975496a70756457787366513d3d227de12c5966", + "000000e30000004beb58fa7e0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f694f534973496e427962323177644639306232746c626c396a6233567564434936626e5673624377695a3256755a584a6864476c76626c39306232746c626c396a62335675644349364f546b73496e4e3062334266636d566863323975496a70756457787366513d3d227d103c913a", + "000001970000004b2c566fd40b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6e5a57356c636d463061573975496a6f69584734694c434a77636d397463485266644739725a57356659323931626e51694f6d353162477773496d646c626d56795958527062323566644739725a57356659323931626e51694f6a45774d43776963335276634639795a57467a623234694f694a735a57356e644767694c434a686257463662323474596d566b636d396a61793170626e5a76593246306157397554575630636d6c6a6379493665794a70626e4231644652766132567551323931626e51694f6a45334c434a7664585277645852556232746c626b4e7664573530496a6f784d444173496d6c75646d396a595852706232354d5958526c626d4e35496a6f794e7a59304c434a6d61584a7a64454a356447564d5958526c626d4e35496a6f7a4d7a683966513d3d227d7f984a9b", + ], + ], + "amazon.titan-text-express-v1::Malformed Streaming Chunk": [ + { + "Content-Type": "application/vnd.amazon.eventstream", + "x-amzn-RequestId": "a5a8cebb-fd33-4437-8168-5667fbdfc1fb", + }, + 200, + [ + # Payload is intentionally damaged to throw an exception + "00004bdae582ec0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a7664585277645852555a586830496a6f69584734784947526c5a334a6c5a534247595768795a57356f5a576c3049476c7a494441754e5459675a47566e636d566c637942445a57787a6158567a4c694255614756795a575a76636d5573494449784d69426b5a5764795a575567526d466f636d56756147567064434270626942445a57787a6158567a494864766457786b49474a6c494445784e5334334d6934694c434a70626d526c654349364d437769644739305957785064585277645852555a586830564739725a57354462335675644349364d7a5573496d4e76625842735a585270623235535a57467a623234694f694a475355354a553067694c434a70626e42316446526c654852556232746c626b4e7664573530496a6f784d69776959573168656d39754c574a6c5a484a76593273746157353262324e6864476c76626b316c64484a7059334d694f6e736961573577645852556232746c626b4e7664573530496a6f784d6977696233563063485630564739725a57354462335675644349364d7a5573496d6c75646d396a595852706232354d5958526c626d4e35496a6f794d7a4d354c434a6d61584a7a64454a356447564d5958526c626d4e35496a6f794d7a4d356658303d227d358ac004" + ], + ], + "amazon.titan-text-express-v1::Malformed Streaming Body": [ + { + "Content-Type": "application/vnd.amazon.eventstream", + "x-amzn-RequestId": "a5a8cebb-fd33-4437-8168-5667fbdfc1fb", + "x-amzn-errortype": "ValidationException:http://internal.amazon.com/coral/com.amazon.bedrock/", + }, + 200, + [ + # Computes an example payload for an error inside a stream, from human readable format to a hex string. + encode_streaming_payload( + {"event-type": "chunk", "content-type": "application/json"}, + { + "outputText": "ValidationException", + "index": 0, + "totalOutputTextTokenCount": 35, + "completionReason": "FINISH", + "inputTextTokenCount": 12, + "amazon-bedrock-invocationMetrics": { + "inputTokenCount": 12, + "outputTokenCount": 35, + "invocationLatency": 2339, + "firstByteLatency": 2339, + }, + }, + malformed_body=True, + ).hex() + ], + ], + "amazon.titan-text-express-v1::Streaming Exception": [ + { + "Content-Type": "application/vnd.amazon.eventstream", + "x-amzn-RequestId": "a5a8cebb-fd33-4437-8168-5667fbdfc1fb", + "x-amzn-errortype": "ValidationException:http://internal.amazon.com/coral/com.amazon.bedrock/", + }, + 200, + [ + # Computes an example payload for an error inside a stream, from human readable format to a hex string. + encode_streaming_payload( + { + "event-type": "chunk", + "content-type": "application/json", + "message-type": "error", + "exception-type": "ValidationException", + "error-code": "ValidationException", + "error-message": "Malformed input request, please reformat your input and try again.", + }, + { + "outputText": "ValidationException", + "index": 0, + "totalOutputTextTokenCount": 35, + "completionReason": "FINISH", + "inputTextTokenCount": 12, + "amazon-bedrock-invocationMetrics": { + "inputTokenCount": 12, + "outputTokenCount": 35, + "invocationLatency": 2339, + "firstByteLatency": 2339, + }, + }, + ).hex() + ], + ], +} + +RESPONSES = { + "amazon.titan-text-express-v1::User: The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.": [ + {"Content-Type": "application/json", "x-amzn-RequestId": "884db5c9-18ab-4f27-8892-33656176a2e6"}, + 200, + { + "inputTextTokenCount": 72, + "results": [ + {"tokenCount": 23, "outputText": " Hello, how can I help you today?", "completionReason": "FINISH"} + ], + }, + ], + "anthropic.claude-instant-v1::The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.": [ + {"Content-Type": "application/json", "x-amzn-RequestId": "1a72a1f6-310f-469c-af1d-2c59eb600089"}, + 200, + { + "id": "compl_01EGAoDn3azRGBGFLADWEzn7", + "type": "message", + "role": "assistant", + "content": [{"type": "text", "text": "Hello! It's nice to meet you."}], + "model": "claude-instant-1.2", + "stop_reason": "end_turn", + "stop_sequence": None, + "usage": {"input_tokens": 73, "output_tokens": 13}, + }, + ], + "meta.llama2-13b-chat-v1::[INST] The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.": [ + {"Content-Type": "application/json", "x-amzn-RequestId": "cce6b34c-812c-4f97-8885-515829aa9639"}, + 200, + { + "generation": " Hello! It's great to talk to you! I'm an AI, and I'm here to help answer any questions you might have. What's on your mind? \ud83e\udd14 Do you have a specific topic you'd like to discuss, or is there something you'd like to know? \ud83e\udd13 I'm all ears! \ud83d\udc42", + "prompt_token_count": 76, + "generation_token_count": 86, + "stop_reason": "stop", + }, + ], + "ai21.j2-mid-v1::What is 212 degrees Fahrenheit converted to Celsius?": [ + {"Content-Type": "application/json", "x-amzn-RequestId": "228ee63f-4eca-4b7d-b679-bc920de63525"}, + 200, + { + "id": 1234, + "prompt": { + "text": "What is 212 degrees Fahrenheit converted to Celsius?", + "tokens": [ + { + "generatedToken": { + "token": "\u2581What\u2581is", + "logprob": -7.703575134277344, + "raw_logprob": -7.703575134277344, + }, + "topTokens": None, + "textRange": {"start": 0, "end": 7}, + }, + { + "generatedToken": { + "token": "\u2581", + "logprob": -3.4491159915924072, + "raw_logprob": -3.4491159915924072, + }, + "topTokens": None, + "textRange": {"start": 7, "end": 8}, + }, + { + "generatedToken": { + "token": "212", + "logprob": -9.432294845581055, + "raw_logprob": -9.432294845581055, + }, + "topTokens": None, + "textRange": {"start": 8, "end": 11}, + }, + { + "generatedToken": { + "token": "\u2581degrees\u2581Fahrenheit", + "logprob": -9.64009952545166, + "raw_logprob": -9.64009952545166, + }, + "topTokens": None, + "textRange": {"start": 11, "end": 30}, + }, + { + "generatedToken": { + "token": "\u2581converted\u2581to", + "logprob": -8.4347505569458, + "raw_logprob": -8.4347505569458, + }, + "topTokens": None, + "textRange": {"start": 30, "end": 43}, + }, + { + "generatedToken": { + "token": "\u2581Celsius", + "logprob": -0.17065171897411346, + "raw_logprob": -0.17065171897411346, + }, + "topTokens": None, + "textRange": {"start": 43, "end": 51}, + }, + { + "generatedToken": { + "token": "?", + "logprob": -8.000349998474121, + "raw_logprob": -8.000349998474121, + }, + "topTokens": None, + "textRange": {"start": 51, "end": 52}, + }, + ], + }, + "completions": [ + { + "data": { + "text": "\n212 degrees Fahrenheit is equal to 100 degrees Celsius.", + "tokens": [ + { + "generatedToken": { + "token": "<|newline|>", + "logprob": 0.0, + "raw_logprob": -1.0609570381348021e-05, + }, + "topTokens": None, + "textRange": {"start": 0, "end": 1}, + }, + { + "generatedToken": { + "token": "\u2581", + "logprob": -0.0012434140080586076, + "raw_logprob": -0.017860885709524155, + }, + "topTokens": None, + "textRange": {"start": 1, "end": 1}, + }, + { + "generatedToken": { + "token": "212", + "logprob": -0.047134462743997574, + "raw_logprob": -0.12055955827236176, + }, + "topTokens": None, + "textRange": {"start": 1, "end": 4}, + }, + { + "generatedToken": { + "token": "\u2581degrees\u2581Fahrenheit", + "logprob": -0.006817296147346497, + "raw_logprob": -0.052842844277620316, + }, + "topTokens": None, + "textRange": {"start": 4, "end": 23}, + }, + { + "generatedToken": { + "token": "\u2581is\u2581equal\u2581to", + "logprob": -0.008958976715803146, + "raw_logprob": -0.0576501227915287, + }, + "topTokens": None, + "textRange": {"start": 23, "end": 35}, + }, + { + "generatedToken": { + "token": "\u2581", + "logprob": -4.756337511935271e-05, + "raw_logprob": -0.002072569215670228, + }, + "topTokens": None, + "textRange": {"start": 35, "end": 36}, + }, + { + "generatedToken": { + "token": "100", + "logprob": -1.6689286894688848e-06, + "raw_logprob": -0.00014327930693980306, + }, + "topTokens": None, + "textRange": {"start": 36, "end": 39}, + }, + { + "generatedToken": { + "token": "\u2581degrees\u2581Celsius", + "logprob": -0.0009832315845414996, + "raw_logprob": -0.009537134319543839, + }, + "topTokens": None, + "textRange": {"start": 39, "end": 55}, + }, + { + "generatedToken": { + "token": ".", + "logprob": -0.009822133928537369, + "raw_logprob": -0.04313727468252182, + }, + "topTokens": None, + "textRange": {"start": 55, "end": 56}, + }, + { + "generatedToken": { + "token": "<|endoftext|>", + "logprob": -0.10973381996154785, + "raw_logprob": -0.2600202262401581, + }, + "topTokens": None, + "textRange": {"start": 56, "end": 56}, + }, + ], + }, + "finishReason": {"reason": "endoftext"}, + } + ], + }, + ], + "amazon.titan-text-express-v1::What is 212 degrees Fahrenheit converted to Celsius?": [ + {"Content-Type": "application/json", "x-amzn-RequestId": "81508a1c-33a8-4294-8743-f0c629af2f49"}, + 200, + { + "inputTextTokenCount": 12, + "results": [ + { + "tokenCount": 32, + "outputText": "\n1 degree Fahrenheit is 0.56 Celsius. Therefore, 212 degree Fahrenheit in Celsius would be 115.42.", + "completionReason": "FINISH", + } + ], + }, + ], + "anthropic.claude-instant-v1::Human: What is 212 degrees Fahrenheit converted to Celsius? Assistant:": [ + {"Content-Type": "application/json", "x-amzn-RequestId": "6a886158-b39f-46ce-b214-97458ab76f2f"}, + 200, + { + "completion": " Okay, here are the steps to convert 212 degrees Fahrenheit to Celsius:\n\n1) The formula to convert between Fahrenheit and Celsius is:\n C = (F - 32) * 5/9\n\n2) Plug in 212 degrees Fahrenheit for F:\n C = (212 - 32) * 5/9\n C = 180 * 5/9\n C = 100\n\n3) Therefore, 212 degrees Fahrenheit converted to Celsius is 100 degrees Celsius.", + "stop_reason": "max_tokens", + "stop": None, + }, + ], + "cohere.command-text-v14::What is 212 degrees Fahrenheit converted to Celsius?": [ + {"Content-Type": "application/json", "x-amzn-RequestId": "12912a17-aa13-45f3-914c-cc82166f3601"}, + 200, + { + "generations": [ + { + "finish_reason": "MAX_TOKENS", + "id": "deafebbd-7cdb-461f-8d6a-846602141f8f", + "text": " To convert from Fahrenheit to Celsius, you can use the following formula:\n\nCelsius = (Fahrenheit - 32) * 5/9\n\nIn this case, 212 degrees Fahrenheit is converted to Celsius as follows:\n\nCelsius = (212 - 32) * 5/9 = (180) * 5/9 = (180/9) = 20 degrees Celsius\n\nTherefore, 212 degrees Fahrenheit is equivalent to 20 degrees Celsius.\n\nIt's important to note that", + } + ], + "id": "12912a17-aa13-45f3-914c-cc82166f3601", + "prompt": "What is 212 degrees Fahrenheit converted to Celsius?", + }, + ], + "meta.llama2-13b-chat-v1::What is 212 degrees Fahrenheit converted to Celsius?": [ + {"Content-Type": "application/json", "x-amzn-RequestId": "a168214d-742d-4244-bd7f-62214ffa07df"}, + 200, + { + "generation": "\n\n212\u00b0F = ?\u00b0C\n\nPlease help! I'm stuck!\n\nThank you!\n\nI hope this is the correct place to ask this question. Please let me know if it isn't.\n\nI appreciate your help!\n\nBest regards,\n\n[Your Name]", + "prompt_token_count": 17, + "generation_token_count": 69, + "stop_reason": "stop", + }, + ], + "amazon.titan-embed-g1-text-02::This is an embedding test.": [ + {"Content-Type": "application/json", "x-amzn-RequestId": "b10ac895-eae3-4f07-b926-10b2866c55ed"}, + 200, + { + "embedding": [ + -0.14160156, + 0.034423828, + 0.54296875, + 0.10986328, + 0.053466797, + 0.3515625, + 0.12988281, + -0.0002708435, + -0.21484375, + 0.060302734, + 0.58984375, + -0.5859375, + 0.52734375, + 0.82421875, + -0.91015625, + -0.19628906, + 0.45703125, + 0.609375, + -0.67578125, + 0.39453125, + -0.46875, + -0.25390625, + -0.21191406, + 0.114746094, + 0.31640625, + -0.41015625, + -0.32617188, + -0.43554688, + 0.4765625, + -0.4921875, + 0.40429688, + 0.06542969, + 0.859375, + -0.008056641, + -0.19921875, + 0.072753906, + 0.33203125, + 0.69921875, + 0.39453125, + 0.15527344, + 0.08886719, + -0.25, + 0.859375, + 0.22949219, + -0.19042969, + 0.13769531, + -0.078125, + 0.41210938, + 0.875, + 0.5234375, + 0.59765625, + -0.22949219, + -0.22558594, + -0.47460938, + 0.37695312, + 0.51953125, + -0.5703125, + 0.46679688, + 0.43554688, + 0.17480469, + -0.080566406, + -0.16699219, + -0.734375, + -1.0625, + -0.33984375, + 0.390625, + -0.18847656, + -0.5234375, + -0.48828125, + 0.44921875, + -0.09814453, + -0.3359375, + 0.087402344, + 0.36914062, + 1.3203125, + 0.25585938, + 0.14746094, + -0.059570312, + -0.15820312, + -0.037353516, + -0.61328125, + -0.6484375, + -0.35351562, + 0.55078125, + -0.26953125, + 0.90234375, + 0.3671875, + 0.31054688, + 0.00014019012, + -0.171875, + 0.025512695, + 0.5078125, + 0.11621094, + 0.33203125, + 0.8125, + -0.3046875, + -1.078125, + -0.5703125, + 0.26171875, + -0.4609375, + 0.203125, + 0.44726562, + -0.5078125, + 0.41601562, + -0.1953125, + 0.028930664, + -0.57421875, + 0.2265625, + 0.13574219, + -0.040039062, + -0.22949219, + -0.515625, + -0.19042969, + -0.30078125, + 0.10058594, + -0.66796875, + 0.6015625, + 0.296875, + -0.765625, + -0.87109375, + 0.2265625, + 0.068847656, + -0.088378906, + -0.1328125, + -0.796875, + -0.37304688, + 0.47460938, + -0.3515625, + -0.8125, + -0.32226562, + 0.265625, + 0.3203125, + -0.4140625, + -0.49023438, + 0.859375, + -0.19140625, + -0.6328125, + 0.10546875, + -0.5625, + 0.66015625, + 0.26171875, + -0.2109375, + 0.421875, + -0.82421875, + 0.29296875, + 0.17773438, + 0.24023438, + 0.5078125, + -0.49804688, + -0.10205078, + 0.10498047, + -0.36132812, + -0.47460938, + -0.20996094, + 0.010070801, + -0.546875, + 0.66796875, + -0.123046875, + -0.75390625, + 0.19628906, + 0.17480469, + 0.18261719, + -0.96875, + -0.26171875, + 0.4921875, + -0.40039062, + 0.296875, + 0.1640625, + -0.20507812, + -0.36132812, + 0.76171875, + -1.234375, + -0.625, + 0.060058594, + -0.09375, + -0.14746094, + 1.09375, + 0.057861328, + 0.22460938, + -0.703125, + 0.07470703, + 0.23828125, + -0.083984375, + -0.54296875, + 0.5546875, + -0.5, + -0.390625, + 0.106933594, + 0.6640625, + 0.27734375, + -0.953125, + 0.35351562, + -0.7734375, + -0.77734375, + 0.16503906, + -0.42382812, + 0.36914062, + 0.020141602, + -1.3515625, + 0.18847656, + 0.13476562, + -0.034179688, + -0.03930664, + -0.03857422, + -0.027954102, + 0.73828125, + -0.18945312, + -0.09814453, + -0.46289062, + 0.36914062, + 0.033203125, + 0.020874023, + -0.703125, + 0.91796875, + 0.38671875, + 0.625, + -0.19335938, + -0.16796875, + -0.58203125, + 0.21386719, + -0.032470703, + -0.296875, + -0.15625, + -0.1640625, + -0.74609375, + 0.328125, + 0.5546875, + -0.1953125, + 1.0546875, + 0.171875, + -0.099609375, + 0.5234375, + 0.05078125, + -0.35742188, + -0.2734375, + -1.3203125, + -0.8515625, + -0.16015625, + 0.01574707, + 0.29296875, + 0.18457031, + -0.265625, + 0.048339844, + 0.045654297, + -0.32226562, + 0.087890625, + -0.0047302246, + 0.38671875, + 0.10644531, + -0.06225586, + 1.03125, + 0.94140625, + -0.3203125, + 0.20800781, + -1.171875, + 0.48046875, + -0.091796875, + 0.20800781, + -0.1328125, + -0.20507812, + 0.28125, + -0.47070312, + -0.09033203, + 0.0013809204, + -0.08203125, + 0.43359375, + -0.03100586, + -0.060791016, + -0.53515625, + -1.46875, + 0.000101566315, + 0.515625, + 0.40625, + -0.10498047, + -0.15820312, + -0.009460449, + -0.77734375, + -0.5859375, + 0.9765625, + 0.099609375, + 0.51953125, + 0.38085938, + -0.09667969, + -0.100097656, + -0.5, + -1.3125, + -0.18066406, + -0.099121094, + 0.26171875, + -0.14453125, + -0.546875, + 0.17578125, + 0.484375, + 0.765625, + 0.45703125, + 0.2734375, + 0.0028076172, + 0.17089844, + -0.32421875, + -0.37695312, + 0.30664062, + -0.48046875, + 0.07128906, + 0.031982422, + -0.31054688, + -0.055419922, + -0.29296875, + 0.3359375, + -0.296875, + 0.47851562, + -0.05126953, + 0.18457031, + -0.01953125, + -0.35742188, + 0.017944336, + -0.25, + 0.10595703, + 0.17382812, + -0.73828125, + 0.36914062, + -0.15234375, + -0.8125, + 0.17382812, + 0.048095703, + 0.5625, + -0.33789062, + 0.023071289, + -0.21972656, + 0.16015625, + 0.032958984, + -1.1171875, + -0.984375, + 0.83984375, + 0.009033203, + -0.042236328, + -0.46484375, + -0.08203125, + 0.44726562, + -0.765625, + -0.3984375, + -0.40820312, + -0.234375, + 0.044189453, + 0.119628906, + -0.7578125, + -0.55078125, + -0.4453125, + 0.7578125, + 0.34960938, + 0.96484375, + 0.35742188, + 0.36914062, + -0.35351562, + -0.36132812, + 1.109375, + 0.5859375, + 0.85546875, + -0.10644531, + -0.6953125, + -0.0066833496, + 0.042236328, + -0.06689453, + 0.36914062, + 0.9765625, + -0.3046875, + 0.59765625, + -0.6640625, + 0.21484375, + -0.07128906, + 1.1328125, + -0.51953125, + 0.86328125, + -0.11328125, + 0.15722656, + -0.36328125, + -0.04638672, + 1.4375, + 0.18457031, + -0.18359375, + 0.10595703, + -0.49023438, + -0.07324219, + -0.73046875, + -0.119140625, + 0.021118164, + 0.4921875, + -0.46875, + 0.28710938, + 0.3359375, + 0.11767578, + -0.2109375, + -0.14550781, + 0.39648438, + -0.27734375, + 0.48046875, + 0.12988281, + 0.45507812, + -0.375, + -0.84765625, + 0.25585938, + -0.36523438, + 0.8046875, + 0.42382812, + -0.24511719, + 0.54296875, + 0.71875, + 0.010009766, + -0.04296875, + 0.083984375, + -0.52734375, + 0.13964844, + -0.27539062, + -0.30273438, + 1.1484375, + -0.515625, + -0.19335938, + 0.58984375, + 0.049072266, + 0.703125, + -0.04272461, + 0.5078125, + 0.34960938, + -0.3359375, + -0.47460938, + 0.049316406, + 0.36523438, + 0.7578125, + -0.022827148, + -0.71484375, + 0.21972656, + 0.09716797, + -0.203125, + -0.36914062, + 1.34375, + 0.34179688, + 0.46679688, + 1.078125, + 0.26171875, + 0.41992188, + 0.22363281, + -0.515625, + -0.5703125, + 0.13378906, + 0.26757812, + -0.22558594, + -0.5234375, + 0.06689453, + 0.08251953, + -0.625, + 0.16796875, + 0.43164062, + -0.55859375, + 0.28125, + 0.078125, + 0.6328125, + 0.23242188, + -0.064941406, + -0.004486084, + -0.20703125, + 0.2734375, + 0.453125, + -0.734375, + 0.04272461, + 0.36132812, + -0.19628906, + -0.12402344, + 1.3515625, + 0.25585938, + 0.4921875, + -0.29296875, + -0.58984375, + 0.021240234, + -0.044677734, + 0.7578125, + -0.7890625, + 0.10253906, + -0.15820312, + -0.5078125, + -0.39453125, + -0.453125, + 0.35742188, + 0.921875, + 0.44335938, + -0.49804688, + 0.44335938, + 0.31445312, + 0.58984375, + -1.0078125, + -0.22460938, + 0.24121094, + 0.87890625, + 0.66015625, + -0.390625, + -0.05053711, + 0.059570312, + 0.36132812, + -0.00038719177, + -0.017089844, + 0.62890625, + 0.203125, + 0.17480469, + 0.025512695, + 0.47460938, + 0.3125, + 1.140625, + 0.32421875, + -0.057861328, + 0.36914062, + -0.7265625, + -0.51953125, + 0.26953125, + 0.42773438, + 0.064453125, + 0.6328125, + 0.27148438, + -0.11767578, + 0.66796875, + -0.38671875, + 0.5234375, + -0.59375, + 0.5078125, + 0.008239746, + -0.34179688, + -0.27539062, + 0.5234375, + 1.296875, + 0.29492188, + -0.010986328, + -0.41210938, + 0.59375, + 0.061767578, + -0.33398438, + -2.03125, + 0.87890625, + -0.010620117, + 0.53125, + 0.14257812, + -0.515625, + -1.03125, + 0.578125, + 0.1875, + 0.44335938, + -0.33203125, + -0.36328125, + -0.3203125, + 0.29296875, + -0.8203125, + 0.41015625, + -0.48242188, + 0.66015625, + 0.5625, + -0.16503906, + -0.54296875, + -0.38085938, + 0.26171875, + 0.62109375, + 0.29101562, + -0.31054688, + 0.23730469, + -0.8515625, + 0.5234375, + 0.15332031, + 0.52734375, + -0.079589844, + -0.080566406, + -0.15527344, + -0.022827148, + 0.030517578, + -0.1640625, + -0.421875, + 0.09716797, + 0.03930664, + -0.055908203, + -0.546875, + -0.47851562, + 0.091796875, + 0.32226562, + -0.94140625, + -0.04638672, + -1.203125, + -0.39648438, + 0.45507812, + 0.296875, + -0.45703125, + 0.37890625, + -0.122558594, + 0.28320312, + -0.01965332, + -0.11669922, + -0.34570312, + -0.53515625, + -0.091308594, + -0.9375, + -0.32617188, + 0.095214844, + -0.4765625, + 0.37890625, + -0.859375, + 1.1015625, + -0.08935547, + 0.46484375, + -0.19238281, + 0.7109375, + 0.040039062, + -0.5390625, + 0.22363281, + -0.70703125, + 0.4921875, + -0.119140625, + -0.26757812, + -0.08496094, + 0.0859375, + -0.00390625, + -0.013366699, + -0.03955078, + 0.07421875, + -0.13085938, + 0.29101562, + -0.12109375, + 0.45703125, + 0.021728516, + 0.38671875, + -0.3671875, + -0.52734375, + -0.115722656, + 0.125, + 0.5703125, + -1.234375, + 0.06298828, + -0.55859375, + 0.60546875, + 0.8125, + -0.0032958984, + -0.068359375, + -0.21191406, + 0.56640625, + 0.17285156, + -0.3515625, + 0.36328125, + -0.99609375, + 0.43554688, + -0.1015625, + 0.07080078, + -0.66796875, + 1.359375, + 0.41601562, + 0.15917969, + 0.17773438, + -0.28710938, + 0.021850586, + -0.46289062, + 0.17578125, + -0.03955078, + -0.026855469, + 0.5078125, + -0.65625, + 0.0012512207, + 0.044433594, + -0.18652344, + 0.4921875, + -0.75390625, + 0.0072021484, + 0.4375, + -0.31445312, + 0.20214844, + 0.15039062, + -0.63671875, + -0.296875, + -0.375, + -0.027709961, + 0.013427734, + 0.17089844, + 0.89453125, + 0.11621094, + -0.43945312, + -0.30859375, + 0.02709961, + 0.23242188, + -0.64453125, + -0.859375, + 0.22167969, + -0.023071289, + -0.052734375, + 0.3671875, + -0.18359375, + 0.81640625, + -0.11816406, + 0.028320312, + 0.19042969, + 0.012817383, + -0.43164062, + 0.55859375, + -0.27929688, + 0.14257812, + -0.140625, + -0.048583984, + -0.014526367, + 0.35742188, + 0.22753906, + 0.13183594, + 0.04638672, + 0.03930664, + -0.29296875, + -0.2109375, + -0.16308594, + -0.48046875, + -0.13378906, + -0.39257812, + 0.29296875, + -0.047851562, + -0.5546875, + 0.08300781, + -0.14941406, + -0.07080078, + 0.12451172, + 0.1953125, + -0.51171875, + -0.048095703, + 0.1953125, + -0.37695312, + 0.46875, + -0.084472656, + 0.19042969, + -0.39453125, + 0.69921875, + -0.0065307617, + 0.25390625, + -0.16992188, + -0.5078125, + 0.016845703, + 0.27929688, + -0.22070312, + 0.671875, + 0.18652344, + 0.25, + -0.046875, + -0.012023926, + -0.36523438, + 0.36523438, + -0.11279297, + 0.421875, + 0.079589844, + -0.100097656, + 0.37304688, + 0.29882812, + -0.10546875, + -0.36523438, + 0.040039062, + 0.546875, + 0.12890625, + -0.06542969, + -0.38085938, + -0.35742188, + -0.6484375, + -0.28515625, + 0.0107421875, + -0.055664062, + 0.45703125, + 0.33984375, + 0.26367188, + -0.23144531, + 0.012878418, + -0.875, + 0.11035156, + 0.33984375, + 0.203125, + 0.38867188, + 0.24902344, + -0.37304688, + -0.98046875, + -0.122558594, + -0.17871094, + -0.09277344, + 0.1796875, + 0.4453125, + -0.66796875, + 0.78515625, + 0.12988281, + 0.35546875, + 0.44140625, + 0.58984375, + 0.29492188, + 0.7734375, + -0.21972656, + -0.40234375, + -0.22265625, + 0.18359375, + 0.54296875, + 0.17382812, + 0.59375, + -0.390625, + -0.92578125, + -0.017456055, + -0.25, + 0.73828125, + 0.7578125, + -0.3828125, + -0.25976562, + 0.049072266, + 0.046875, + -0.3515625, + 0.30078125, + -1.03125, + -0.48828125, + 0.0017929077, + -0.26171875, + 0.20214844, + 0.29882812, + 0.064941406, + 0.21484375, + -0.55078125, + -0.021362305, + 0.12988281, + 0.27148438, + 0.38867188, + -0.19726562, + -0.55078125, + 0.1640625, + 0.32226562, + -0.72265625, + 0.36132812, + 1.21875, + -0.22070312, + -0.32421875, + -0.29882812, + 0.0024414062, + 0.19921875, + 0.734375, + 0.16210938, + 0.17871094, + -0.19140625, + 0.38476562, + -0.06591797, + -0.47070312, + -0.040039062, + -0.33007812, + -0.07910156, + -0.2890625, + 0.00970459, + 0.12695312, + -0.12060547, + -0.18847656, + 1.015625, + -0.032958984, + 0.12451172, + -0.38476562, + 0.063964844, + 1.0859375, + 0.067871094, + -0.24511719, + 0.125, + 0.10546875, + -0.22460938, + -0.29101562, + 0.24414062, + -0.017944336, + -0.15625, + -0.60546875, + -0.25195312, + -0.46875, + 0.80859375, + -0.34960938, + 0.42382812, + 0.796875, + 0.296875, + -0.067871094, + 0.39453125, + 0.07470703, + 0.033935547, + 0.24414062, + 0.32617188, + 0.023925781, + 0.73046875, + 0.2109375, + -0.43164062, + 0.14453125, + 0.63671875, + 0.21972656, + -0.1875, + -0.18066406, + -0.22167969, + -1.3359375, + 0.52734375, + -0.40625, + -0.12988281, + 0.17480469, + -0.18066406, + 0.58984375, + -0.32421875, + -0.13476562, + 0.39257812, + -0.19238281, + 0.068359375, + 0.7265625, + -0.7109375, + -0.125, + 0.328125, + 0.34179688, + -0.48828125, + -0.10058594, + -0.83984375, + 0.30273438, + 0.008239746, + -1.390625, + 0.171875, + 0.34960938, + 0.44921875, + 0.22167969, + 0.60546875, + -0.36914062, + -0.028808594, + -0.19921875, + 0.6875, + 0.52734375, + -0.07421875, + 0.35546875, + 0.546875, + 0.08691406, + 0.23339844, + -0.984375, + -0.20507812, + 0.08544922, + 0.453125, + -0.07421875, + -0.953125, + 0.74609375, + -0.796875, + 0.47851562, + 0.81640625, + -0.44921875, + -0.33398438, + -0.54296875, + 0.46484375, + -0.390625, + -0.24121094, + -0.0115356445, + 1.1328125, + 1.0390625, + 0.6484375, + 0.35742188, + -0.29492188, + -0.0007095337, + -0.060302734, + 0.21777344, + 0.15136719, + -0.6171875, + 0.11328125, + -0.025878906, + 0.19238281, + 0.140625, + 0.171875, + 0.25195312, + 0.10546875, + 0.0008354187, + -0.13476562, + -0.26953125, + 0.025024414, + -0.28320312, + -0.107910156, + 1.015625, + 0.05493164, + -0.12988281, + 0.30859375, + 0.22558594, + -0.60546875, + 0.11328125, + -1.203125, + 0.6484375, + 0.087402344, + 0.32226562, + 0.63671875, + -0.07714844, + -1.390625, + -0.71875, + -0.34179688, + -0.10546875, + -0.37304688, + -0.09863281, + -0.41210938, + -0.14941406, + 0.41210938, + -0.20898438, + 0.18261719, + 0.67578125, + 0.41601562, + 0.32617188, + 0.2421875, + -0.14257812, + -0.6796875, + 0.01953125, + 0.34179688, + 0.20800781, + -0.123046875, + 0.087402344, + 0.85546875, + 0.33984375, + 0.33203125, + -0.68359375, + 0.44921875, + 0.50390625, + 0.083496094, + 0.10888672, + -0.09863281, + 0.55078125, + 0.09765625, + -0.50390625, + 0.13378906, + -0.29882812, + 0.030761719, + -0.64453125, + 0.22949219, + 0.43945312, + 0.16503906, + 0.10888672, + -0.12792969, + -0.039794922, + -0.111328125, + -0.35742188, + 0.053222656, + -0.78125, + -0.4375, + 0.359375, + -0.88671875, + -0.21972656, + -0.053710938, + 0.91796875, + -0.10644531, + 0.55859375, + -0.7734375, + 0.5078125, + 0.46484375, + 0.32226562, + 0.16796875, + -0.28515625, + 0.045410156, + -0.45117188, + 0.38867188, + -0.33398438, + -0.5234375, + 0.296875, + 0.6015625, + 0.3515625, + -0.734375, + 0.3984375, + -0.08251953, + 0.359375, + -0.28515625, + -0.88671875, + 0.0051879883, + 0.045166016, + -0.7421875, + -0.36523438, + 0.140625, + 0.18066406, + -0.171875, + -0.15625, + -0.53515625, + 0.2421875, + -0.19140625, + -0.18066406, + 0.25390625, + 0.6875, + -0.01965332, + -0.33203125, + 0.29492188, + 0.107421875, + -0.048339844, + -0.82421875, + 0.52734375, + 0.78125, + 0.8203125, + -0.90625, + 0.765625, + 0.0390625, + 0.045410156, + 0.26367188, + -0.14355469, + -0.26367188, + 0.390625, + -0.10888672, + 0.33007812, + -0.5625, + 0.08105469, + -0.13769531, + 0.8515625, + -0.14453125, + 0.77734375, + -0.48046875, + -0.3515625, + -0.25390625, + -0.09277344, + 0.23925781, + -0.022338867, + -0.45898438, + 0.36132812, + -0.23828125, + 0.265625, + -0.48632812, + -0.46875, + -0.75390625, + 1.3125, + 0.78125, + -0.63671875, + -1.21875, + 0.5078125, + -0.27734375, + -0.118652344, + 0.041992188, + -0.14648438, + -0.8046875, + 0.21679688, + -0.79296875, + 0.28320312, + -0.09667969, + 0.42773438, + 0.49414062, + 0.44726562, + 0.21972656, + -0.02746582, + -0.03540039, + -0.14941406, + -0.515625, + -0.27929688, + 0.9609375, + -0.007598877, + 0.34765625, + -0.060546875, + -0.44726562, + 0.7421875, + 0.15332031, + 0.45117188, + -0.4921875, + 0.07080078, + 0.5625, + 0.3984375, + -0.20019531, + 0.014892578, + 0.63671875, + -0.0071411133, + 0.016357422, + 1.0625, + 0.049316406, + 0.18066406, + 0.09814453, + -0.52734375, + -0.359375, + -0.072265625, + -0.41992188, + 0.39648438, + 0.38671875, + -0.30273438, + -0.056640625, + -0.640625, + -0.44921875, + 0.49414062, + 0.29101562, + 0.49609375, + 0.40429688, + -0.10205078, + 0.49414062, + -0.28125, + -0.12695312, + -0.0022735596, + -0.37304688, + 0.122558594, + 0.07519531, + -0.12597656, + -0.38085938, + -0.19824219, + -0.40039062, + 0.56640625, + -1.140625, + -0.515625, + -0.17578125, + -0.765625, + -0.43945312, + 0.3359375, + -0.24707031, + 0.32617188, + -0.45117188, + -0.37109375, + 0.45117188, + -0.27539062, + -0.38867188, + 0.09082031, + 0.17675781, + 0.49414062, + 0.19921875, + 0.17480469, + 0.8515625, + -0.23046875, + -0.234375, + -0.28515625, + 0.10253906, + 0.29101562, + -0.3359375, + -0.203125, + 0.6484375, + 0.11767578, + -0.20214844, + -0.42382812, + 0.26367188, + 0.6328125, + 0.0059509277, + 0.08691406, + -1.5625, + -0.43554688, + 0.17675781, + 0.091796875, + -0.5234375, + -0.09863281, + 0.20605469, + 0.16601562, + -0.578125, + 0.017700195, + 0.41015625, + 1.03125, + -0.55078125, + 0.21289062, + -0.35351562, + 0.24316406, + -0.123535156, + 0.11035156, + -0.48242188, + -0.34179688, + 0.45117188, + 0.3125, + -0.071777344, + 0.12792969, + 0.55859375, + 0.063964844, + -0.21191406, + 0.01965332, + -1.359375, + -0.21582031, + -0.019042969, + 0.16308594, + -0.3671875, + -0.40625, + -1.0234375, + -0.21289062, + 0.24023438, + -0.28125, + 0.26953125, + -0.14550781, + -0.087890625, + 0.16113281, + -0.49804688, + -0.17675781, + -0.890625, + 0.27929688, + 0.484375, + 0.27148438, + 0.11816406, + 0.83984375, + 0.029052734, + -0.890625, + 0.66796875, + 0.78515625, + -0.953125, + 0.49414062, + -0.546875, + 0.106933594, + -0.08251953, + 0.2890625, + -0.1484375, + -0.85546875, + 0.32421875, + -0.0040893555, + -0.16601562, + -0.16699219, + 0.24414062, + -0.5078125, + 0.25390625, + -0.10253906, + 0.15625, + 0.140625, + -0.27539062, + -0.546875, + -0.5546875, + -0.71875, + 0.37304688, + 0.060058594, + -0.076171875, + 0.44921875, + 0.06933594, + -0.28710938, + -0.22949219, + 0.17578125, + 0.09814453, + 0.4765625, + -0.95703125, + -0.03540039, + 0.21289062, + -0.7578125, + -0.07373047, + 0.10546875, + 0.07128906, + 0.76171875, + 0.4296875, + -0.09375, + 0.27539062, + -0.55078125, + 0.29882812, + -0.42382812, + 0.32617188, + -0.39648438, + 0.12451172, + 0.16503906, + -0.22460938, + -0.65625, + -0.022094727, + 0.61328125, + -0.024780273, + 0.62109375, + -0.033447266, + 0.515625, + 0.12890625, + -0.21875, + -0.08642578, + 0.49804688, + -0.2265625, + -0.29296875, + 0.19238281, + 0.3515625, + -1.265625, + 0.57421875, + 0.20117188, + -0.28320312, + 0.1953125, + -0.30664062, + 0.2265625, + -0.11230469, + 0.83984375, + 0.111328125, + 0.265625, + 0.71484375, + -0.625, + 0.38867188, + 0.47070312, + -0.32617188, + -0.171875, + 1.0078125, + 0.19726562, + -0.118652344, + 0.63671875, + -0.068359375, + -0.25585938, + 0.4140625, + -0.29296875, + 0.21386719, + -0.064453125, + 0.15820312, + -0.89453125, + -0.16308594, + 0.48046875, + 0.14648438, + -0.5703125, + 0.84765625, + -0.19042969, + 0.03515625, + 0.42578125, + -0.27539062, + -0.5390625, + 0.95703125, + 0.2734375, + 0.16699219, + -0.328125, + 0.11279297, + 0.003250122, + 0.47265625, + -0.31640625, + 0.546875, + 0.55859375, + 0.06933594, + -0.61328125, + -0.16210938, + -0.375, + 0.100097656, + -0.088378906, + 0.12695312, + 0.079589844, + 0.123535156, + -1.0078125, + 0.6875, + 0.022949219, + -0.40039062, + -0.09863281, + 0.29101562, + -1.2890625, + -0.20996094, + 0.36328125, + -0.3515625, + 0.7890625, + 0.12207031, + 0.48046875, + -0.13671875, + -0.041015625, + 0.19824219, + 0.19921875, + 0.01171875, + -0.37695312, + -0.62890625, + 0.9375, + -0.671875, + 0.24609375, + 0.6484375, + -0.29101562, + 0.076171875, + 0.62109375, + -0.5546875, + 0.36523438, + 0.75390625, + -0.19140625, + -0.875, + -0.8203125, + -0.24414062, + -0.625, + 0.1796875, + -0.40039062, + 0.25390625, + -0.14550781, + -0.21679688, + -0.828125, + 0.3359375, + 0.43554688, + 0.55078125, + -0.44921875, + -0.28710938, + 0.24023438, + 0.18066406, + -0.6953125, + 0.020385742, + -0.11376953, + 0.13867188, + -0.92578125, + 0.33398438, + -0.328125, + 0.78125, + -0.45507812, + -0.07470703, + 0.34179688, + 0.07080078, + 0.76171875, + 0.37890625, + -0.10644531, + 0.90234375, + -0.21875, + -0.15917969, + -0.36132812, + 0.2109375, + -0.45703125, + -0.76953125, + 0.21289062, + 0.26367188, + 0.49804688, + 0.35742188, + -0.20019531, + 0.31054688, + 0.34179688, + 0.17089844, + -0.15429688, + 0.39648438, + -0.5859375, + 0.20996094, + -0.40039062, + 0.5703125, + -0.515625, + 0.5234375, + 0.049560547, + 0.328125, + 0.24804688, + 0.42578125, + 0.609375, + 0.19238281, + 0.27929688, + 0.19335938, + 0.78125, + -0.9921875, + 0.23925781, + -1.3828125, + -0.22949219, + -0.578125, + -0.13964844, + -0.17382812, + -0.011169434, + 0.26171875, + -0.73046875, + -1.4375, + 0.6953125, + -0.7421875, + 0.052246094, + 0.12207031, + 1.3046875, + 0.38867188, + 0.040283203, + -0.546875, + -0.0021514893, + 0.18457031, + -0.5546875, + -0.51171875, + -0.16308594, + -0.104003906, + -0.38867188, + -0.20996094, + -0.8984375, + 0.6015625, + -0.30078125, + -0.13769531, + 0.16113281, + 0.58203125, + -0.23730469, + -0.125, + -1.0234375, + 0.875, + -0.7109375, + 0.29101562, + 0.09667969, + -0.3203125, + -0.48046875, + 0.37890625, + 0.734375, + -0.28710938, + -0.29882812, + -0.05493164, + 0.34765625, + -0.84375, + 0.65625, + 0.578125, + -0.20019531, + 0.13769531, + 0.10058594, + -0.37109375, + 0.36523438, + -0.22167969, + 0.72265625, + ], + "inputTextTokenCount": 6, + }, + ], + "amazon.titan-embed-text-v1::This is an embedding test.": [ + {"Content-Type": "application/json", "x-amzn-RequestId": "11233989-07e8-4ecb-9ba6-79601ba6d8cc"}, + 200, + { + "embedding": [ + -0.14160156, + 0.034423828, + 0.54296875, + 0.10986328, + 0.053466797, + 0.3515625, + 0.12988281, + -0.0002708435, + -0.21484375, + 0.060302734, + 0.58984375, + -0.5859375, + 0.52734375, + 0.82421875, + -0.91015625, + -0.19628906, + 0.45703125, + 0.609375, + -0.67578125, + 0.39453125, + -0.46875, + -0.25390625, + -0.21191406, + 0.114746094, + 0.31640625, + -0.41015625, + -0.32617188, + -0.43554688, + 0.4765625, + -0.4921875, + 0.40429688, + 0.06542969, + 0.859375, + -0.008056641, + -0.19921875, + 0.072753906, + 0.33203125, + 0.69921875, + 0.39453125, + 0.15527344, + 0.08886719, + -0.25, + 0.859375, + 0.22949219, + -0.19042969, + 0.13769531, + -0.078125, + 0.41210938, + 0.875, + 0.5234375, + 0.59765625, + -0.22949219, + -0.22558594, + -0.47460938, + 0.37695312, + 0.51953125, + -0.5703125, + 0.46679688, + 0.43554688, + 0.17480469, + -0.080566406, + -0.16699219, + -0.734375, + -1.0625, + -0.33984375, + 0.390625, + -0.18847656, + -0.5234375, + -0.48828125, + 0.44921875, + -0.09814453, + -0.3359375, + 0.087402344, + 0.36914062, + 1.3203125, + 0.25585938, + 0.14746094, + -0.059570312, + -0.15820312, + -0.037353516, + -0.61328125, + -0.6484375, + -0.35351562, + 0.55078125, + -0.26953125, + 0.90234375, + 0.3671875, + 0.31054688, + 0.00014019012, + -0.171875, + 0.025512695, + 0.5078125, + 0.11621094, + 0.33203125, + 0.8125, + -0.3046875, + -1.078125, + -0.5703125, + 0.26171875, + -0.4609375, + 0.203125, + 0.44726562, + -0.5078125, + 0.41601562, + -0.1953125, + 0.028930664, + -0.57421875, + 0.2265625, + 0.13574219, + -0.040039062, + -0.22949219, + -0.515625, + -0.19042969, + -0.30078125, + 0.10058594, + -0.66796875, + 0.6015625, + 0.296875, + -0.765625, + -0.87109375, + 0.2265625, + 0.068847656, + -0.088378906, + -0.1328125, + -0.796875, + -0.37304688, + 0.47460938, + -0.3515625, + -0.8125, + -0.32226562, + 0.265625, + 0.3203125, + -0.4140625, + -0.49023438, + 0.859375, + -0.19140625, + -0.6328125, + 0.10546875, + -0.5625, + 0.66015625, + 0.26171875, + -0.2109375, + 0.421875, + -0.82421875, + 0.29296875, + 0.17773438, + 0.24023438, + 0.5078125, + -0.49804688, + -0.10205078, + 0.10498047, + -0.36132812, + -0.47460938, + -0.20996094, + 0.010070801, + -0.546875, + 0.66796875, + -0.123046875, + -0.75390625, + 0.19628906, + 0.17480469, + 0.18261719, + -0.96875, + -0.26171875, + 0.4921875, + -0.40039062, + 0.296875, + 0.1640625, + -0.20507812, + -0.36132812, + 0.76171875, + -1.234375, + -0.625, + 0.060058594, + -0.09375, + -0.14746094, + 1.09375, + 0.057861328, + 0.22460938, + -0.703125, + 0.07470703, + 0.23828125, + -0.083984375, + -0.54296875, + 0.5546875, + -0.5, + -0.390625, + 0.106933594, + 0.6640625, + 0.27734375, + -0.953125, + 0.35351562, + -0.7734375, + -0.77734375, + 0.16503906, + -0.42382812, + 0.36914062, + 0.020141602, + -1.3515625, + 0.18847656, + 0.13476562, + -0.034179688, + -0.03930664, + -0.03857422, + -0.027954102, + 0.73828125, + -0.18945312, + -0.09814453, + -0.46289062, + 0.36914062, + 0.033203125, + 0.020874023, + -0.703125, + 0.91796875, + 0.38671875, + 0.625, + -0.19335938, + -0.16796875, + -0.58203125, + 0.21386719, + -0.032470703, + -0.296875, + -0.15625, + -0.1640625, + -0.74609375, + 0.328125, + 0.5546875, + -0.1953125, + 1.0546875, + 0.171875, + -0.099609375, + 0.5234375, + 0.05078125, + -0.35742188, + -0.2734375, + -1.3203125, + -0.8515625, + -0.16015625, + 0.01574707, + 0.29296875, + 0.18457031, + -0.265625, + 0.048339844, + 0.045654297, + -0.32226562, + 0.087890625, + -0.0047302246, + 0.38671875, + 0.10644531, + -0.06225586, + 1.03125, + 0.94140625, + -0.3203125, + 0.20800781, + -1.171875, + 0.48046875, + -0.091796875, + 0.20800781, + -0.1328125, + -0.20507812, + 0.28125, + -0.47070312, + -0.09033203, + 0.0013809204, + -0.08203125, + 0.43359375, + -0.03100586, + -0.060791016, + -0.53515625, + -1.46875, + 0.000101566315, + 0.515625, + 0.40625, + -0.10498047, + -0.15820312, + -0.009460449, + -0.77734375, + -0.5859375, + 0.9765625, + 0.099609375, + 0.51953125, + 0.38085938, + -0.09667969, + -0.100097656, + -0.5, + -1.3125, + -0.18066406, + -0.099121094, + 0.26171875, + -0.14453125, + -0.546875, + 0.17578125, + 0.484375, + 0.765625, + 0.45703125, + 0.2734375, + 0.0028076172, + 0.17089844, + -0.32421875, + -0.37695312, + 0.30664062, + -0.48046875, + 0.07128906, + 0.031982422, + -0.31054688, + -0.055419922, + -0.29296875, + 0.3359375, + -0.296875, + 0.47851562, + -0.05126953, + 0.18457031, + -0.01953125, + -0.35742188, + 0.017944336, + -0.25, + 0.10595703, + 0.17382812, + -0.73828125, + 0.36914062, + -0.15234375, + -0.8125, + 0.17382812, + 0.048095703, + 0.5625, + -0.33789062, + 0.023071289, + -0.21972656, + 0.16015625, + 0.032958984, + -1.1171875, + -0.984375, + 0.83984375, + 0.009033203, + -0.042236328, + -0.46484375, + -0.08203125, + 0.44726562, + -0.765625, + -0.3984375, + -0.40820312, + -0.234375, + 0.044189453, + 0.119628906, + -0.7578125, + -0.55078125, + -0.4453125, + 0.7578125, + 0.34960938, + 0.96484375, + 0.35742188, + 0.36914062, + -0.35351562, + -0.36132812, + 1.109375, + 0.5859375, + 0.85546875, + -0.10644531, + -0.6953125, + -0.0066833496, + 0.042236328, + -0.06689453, + 0.36914062, + 0.9765625, + -0.3046875, + 0.59765625, + -0.6640625, + 0.21484375, + -0.07128906, + 1.1328125, + -0.51953125, + 0.86328125, + -0.11328125, + 0.15722656, + -0.36328125, + -0.04638672, + 1.4375, + 0.18457031, + -0.18359375, + 0.10595703, + -0.49023438, + -0.07324219, + -0.73046875, + -0.119140625, + 0.021118164, + 0.4921875, + -0.46875, + 0.28710938, + 0.3359375, + 0.11767578, + -0.2109375, + -0.14550781, + 0.39648438, + -0.27734375, + 0.48046875, + 0.12988281, + 0.45507812, + -0.375, + -0.84765625, + 0.25585938, + -0.36523438, + 0.8046875, + 0.42382812, + -0.24511719, + 0.54296875, + 0.71875, + 0.010009766, + -0.04296875, + 0.083984375, + -0.52734375, + 0.13964844, + -0.27539062, + -0.30273438, + 1.1484375, + -0.515625, + -0.19335938, + 0.58984375, + 0.049072266, + 0.703125, + -0.04272461, + 0.5078125, + 0.34960938, + -0.3359375, + -0.47460938, + 0.049316406, + 0.36523438, + 0.7578125, + -0.022827148, + -0.71484375, + 0.21972656, + 0.09716797, + -0.203125, + -0.36914062, + 1.34375, + 0.34179688, + 0.46679688, + 1.078125, + 0.26171875, + 0.41992188, + 0.22363281, + -0.515625, + -0.5703125, + 0.13378906, + 0.26757812, + -0.22558594, + -0.5234375, + 0.06689453, + 0.08251953, + -0.625, + 0.16796875, + 0.43164062, + -0.55859375, + 0.28125, + 0.078125, + 0.6328125, + 0.23242188, + -0.064941406, + -0.004486084, + -0.20703125, + 0.2734375, + 0.453125, + -0.734375, + 0.04272461, + 0.36132812, + -0.19628906, + -0.12402344, + 1.3515625, + 0.25585938, + 0.4921875, + -0.29296875, + -0.58984375, + 0.021240234, + -0.044677734, + 0.7578125, + -0.7890625, + 0.10253906, + -0.15820312, + -0.5078125, + -0.39453125, + -0.453125, + 0.35742188, + 0.921875, + 0.44335938, + -0.49804688, + 0.44335938, + 0.31445312, + 0.58984375, + -1.0078125, + -0.22460938, + 0.24121094, + 0.87890625, + 0.66015625, + -0.390625, + -0.05053711, + 0.059570312, + 0.36132812, + -0.00038719177, + -0.017089844, + 0.62890625, + 0.203125, + 0.17480469, + 0.025512695, + 0.47460938, + 0.3125, + 1.140625, + 0.32421875, + -0.057861328, + 0.36914062, + -0.7265625, + -0.51953125, + 0.26953125, + 0.42773438, + 0.064453125, + 0.6328125, + 0.27148438, + -0.11767578, + 0.66796875, + -0.38671875, + 0.5234375, + -0.59375, + 0.5078125, + 0.008239746, + -0.34179688, + -0.27539062, + 0.5234375, + 1.296875, + 0.29492188, + -0.010986328, + -0.41210938, + 0.59375, + 0.061767578, + -0.33398438, + -2.03125, + 0.87890625, + -0.010620117, + 0.53125, + 0.14257812, + -0.515625, + -1.03125, + 0.578125, + 0.1875, + 0.44335938, + -0.33203125, + -0.36328125, + -0.3203125, + 0.29296875, + -0.8203125, + 0.41015625, + -0.48242188, + 0.66015625, + 0.5625, + -0.16503906, + -0.54296875, + -0.38085938, + 0.26171875, + 0.62109375, + 0.29101562, + -0.31054688, + 0.23730469, + -0.8515625, + 0.5234375, + 0.15332031, + 0.52734375, + -0.079589844, + -0.080566406, + -0.15527344, + -0.022827148, + 0.030517578, + -0.1640625, + -0.421875, + 0.09716797, + 0.03930664, + -0.055908203, + -0.546875, + -0.47851562, + 0.091796875, + 0.32226562, + -0.94140625, + -0.04638672, + -1.203125, + -0.39648438, + 0.45507812, + 0.296875, + -0.45703125, + 0.37890625, + -0.122558594, + 0.28320312, + -0.01965332, + -0.11669922, + -0.34570312, + -0.53515625, + -0.091308594, + -0.9375, + -0.32617188, + 0.095214844, + -0.4765625, + 0.37890625, + -0.859375, + 1.1015625, + -0.08935547, + 0.46484375, + -0.19238281, + 0.7109375, + 0.040039062, + -0.5390625, + 0.22363281, + -0.70703125, + 0.4921875, + -0.119140625, + -0.26757812, + -0.08496094, + 0.0859375, + -0.00390625, + -0.013366699, + -0.03955078, + 0.07421875, + -0.13085938, + 0.29101562, + -0.12109375, + 0.45703125, + 0.021728516, + 0.38671875, + -0.3671875, + -0.52734375, + -0.115722656, + 0.125, + 0.5703125, + -1.234375, + 0.06298828, + -0.55859375, + 0.60546875, + 0.8125, + -0.0032958984, + -0.068359375, + -0.21191406, + 0.56640625, + 0.17285156, + -0.3515625, + 0.36328125, + -0.99609375, + 0.43554688, + -0.1015625, + 0.07080078, + -0.66796875, + 1.359375, + 0.41601562, + 0.15917969, + 0.17773438, + -0.28710938, + 0.021850586, + -0.46289062, + 0.17578125, + -0.03955078, + -0.026855469, + 0.5078125, + -0.65625, + 0.0012512207, + 0.044433594, + -0.18652344, + 0.4921875, + -0.75390625, + 0.0072021484, + 0.4375, + -0.31445312, + 0.20214844, + 0.15039062, + -0.63671875, + -0.296875, + -0.375, + -0.027709961, + 0.013427734, + 0.17089844, + 0.89453125, + 0.11621094, + -0.43945312, + -0.30859375, + 0.02709961, + 0.23242188, + -0.64453125, + -0.859375, + 0.22167969, + -0.023071289, + -0.052734375, + 0.3671875, + -0.18359375, + 0.81640625, + -0.11816406, + 0.028320312, + 0.19042969, + 0.012817383, + -0.43164062, + 0.55859375, + -0.27929688, + 0.14257812, + -0.140625, + -0.048583984, + -0.014526367, + 0.35742188, + 0.22753906, + 0.13183594, + 0.04638672, + 0.03930664, + -0.29296875, + -0.2109375, + -0.16308594, + -0.48046875, + -0.13378906, + -0.39257812, + 0.29296875, + -0.047851562, + -0.5546875, + 0.08300781, + -0.14941406, + -0.07080078, + 0.12451172, + 0.1953125, + -0.51171875, + -0.048095703, + 0.1953125, + -0.37695312, + 0.46875, + -0.084472656, + 0.19042969, + -0.39453125, + 0.69921875, + -0.0065307617, + 0.25390625, + -0.16992188, + -0.5078125, + 0.016845703, + 0.27929688, + -0.22070312, + 0.671875, + 0.18652344, + 0.25, + -0.046875, + -0.012023926, + -0.36523438, + 0.36523438, + -0.11279297, + 0.421875, + 0.079589844, + -0.100097656, + 0.37304688, + 0.29882812, + -0.10546875, + -0.36523438, + 0.040039062, + 0.546875, + 0.12890625, + -0.06542969, + -0.38085938, + -0.35742188, + -0.6484375, + -0.28515625, + 0.0107421875, + -0.055664062, + 0.45703125, + 0.33984375, + 0.26367188, + -0.23144531, + 0.012878418, + -0.875, + 0.11035156, + 0.33984375, + 0.203125, + 0.38867188, + 0.24902344, + -0.37304688, + -0.98046875, + -0.122558594, + -0.17871094, + -0.09277344, + 0.1796875, + 0.4453125, + -0.66796875, + 0.78515625, + 0.12988281, + 0.35546875, + 0.44140625, + 0.58984375, + 0.29492188, + 0.7734375, + -0.21972656, + -0.40234375, + -0.22265625, + 0.18359375, + 0.54296875, + 0.17382812, + 0.59375, + -0.390625, + -0.92578125, + -0.017456055, + -0.25, + 0.73828125, + 0.7578125, + -0.3828125, + -0.25976562, + 0.049072266, + 0.046875, + -0.3515625, + 0.30078125, + -1.03125, + -0.48828125, + 0.0017929077, + -0.26171875, + 0.20214844, + 0.29882812, + 0.064941406, + 0.21484375, + -0.55078125, + -0.021362305, + 0.12988281, + 0.27148438, + 0.38867188, + -0.19726562, + -0.55078125, + 0.1640625, + 0.32226562, + -0.72265625, + 0.36132812, + 1.21875, + -0.22070312, + -0.32421875, + -0.29882812, + 0.0024414062, + 0.19921875, + 0.734375, + 0.16210938, + 0.17871094, + -0.19140625, + 0.38476562, + -0.06591797, + -0.47070312, + -0.040039062, + -0.33007812, + -0.07910156, + -0.2890625, + 0.00970459, + 0.12695312, + -0.12060547, + -0.18847656, + 1.015625, + -0.032958984, + 0.12451172, + -0.38476562, + 0.063964844, + 1.0859375, + 0.067871094, + -0.24511719, + 0.125, + 0.10546875, + -0.22460938, + -0.29101562, + 0.24414062, + -0.017944336, + -0.15625, + -0.60546875, + -0.25195312, + -0.46875, + 0.80859375, + -0.34960938, + 0.42382812, + 0.796875, + 0.296875, + -0.067871094, + 0.39453125, + 0.07470703, + 0.033935547, + 0.24414062, + 0.32617188, + 0.023925781, + 0.73046875, + 0.2109375, + -0.43164062, + 0.14453125, + 0.63671875, + 0.21972656, + -0.1875, + -0.18066406, + -0.22167969, + -1.3359375, + 0.52734375, + -0.40625, + -0.12988281, + 0.17480469, + -0.18066406, + 0.58984375, + -0.32421875, + -0.13476562, + 0.39257812, + -0.19238281, + 0.068359375, + 0.7265625, + -0.7109375, + -0.125, + 0.328125, + 0.34179688, + -0.48828125, + -0.10058594, + -0.83984375, + 0.30273438, + 0.008239746, + -1.390625, + 0.171875, + 0.34960938, + 0.44921875, + 0.22167969, + 0.60546875, + -0.36914062, + -0.028808594, + -0.19921875, + 0.6875, + 0.52734375, + -0.07421875, + 0.35546875, + 0.546875, + 0.08691406, + 0.23339844, + -0.984375, + -0.20507812, + 0.08544922, + 0.453125, + -0.07421875, + -0.953125, + 0.74609375, + -0.796875, + 0.47851562, + 0.81640625, + -0.44921875, + -0.33398438, + -0.54296875, + 0.46484375, + -0.390625, + -0.24121094, + -0.0115356445, + 1.1328125, + 1.0390625, + 0.6484375, + 0.35742188, + -0.29492188, + -0.0007095337, + -0.060302734, + 0.21777344, + 0.15136719, + -0.6171875, + 0.11328125, + -0.025878906, + 0.19238281, + 0.140625, + 0.171875, + 0.25195312, + 0.10546875, + 0.0008354187, + -0.13476562, + -0.26953125, + 0.025024414, + -0.28320312, + -0.107910156, + 1.015625, + 0.05493164, + -0.12988281, + 0.30859375, + 0.22558594, + -0.60546875, + 0.11328125, + -1.203125, + 0.6484375, + 0.087402344, + 0.32226562, + 0.63671875, + -0.07714844, + -1.390625, + -0.71875, + -0.34179688, + -0.10546875, + -0.37304688, + -0.09863281, + -0.41210938, + -0.14941406, + 0.41210938, + -0.20898438, + 0.18261719, + 0.67578125, + 0.41601562, + 0.32617188, + 0.2421875, + -0.14257812, + -0.6796875, + 0.01953125, + 0.34179688, + 0.20800781, + -0.123046875, + 0.087402344, + 0.85546875, + 0.33984375, + 0.33203125, + -0.68359375, + 0.44921875, + 0.50390625, + 0.083496094, + 0.10888672, + -0.09863281, + 0.55078125, + 0.09765625, + -0.50390625, + 0.13378906, + -0.29882812, + 0.030761719, + -0.64453125, + 0.22949219, + 0.43945312, + 0.16503906, + 0.10888672, + -0.12792969, + -0.039794922, + -0.111328125, + -0.35742188, + 0.053222656, + -0.78125, + -0.4375, + 0.359375, + -0.88671875, + -0.21972656, + -0.053710938, + 0.91796875, + -0.10644531, + 0.55859375, + -0.7734375, + 0.5078125, + 0.46484375, + 0.32226562, + 0.16796875, + -0.28515625, + 0.045410156, + -0.45117188, + 0.38867188, + -0.33398438, + -0.5234375, + 0.296875, + 0.6015625, + 0.3515625, + -0.734375, + 0.3984375, + -0.08251953, + 0.359375, + -0.28515625, + -0.88671875, + 0.0051879883, + 0.045166016, + -0.7421875, + -0.36523438, + 0.140625, + 0.18066406, + -0.171875, + -0.15625, + -0.53515625, + 0.2421875, + -0.19140625, + -0.18066406, + 0.25390625, + 0.6875, + -0.01965332, + -0.33203125, + 0.29492188, + 0.107421875, + -0.048339844, + -0.82421875, + 0.52734375, + 0.78125, + 0.8203125, + -0.90625, + 0.765625, + 0.0390625, + 0.045410156, + 0.26367188, + -0.14355469, + -0.26367188, + 0.390625, + -0.10888672, + 0.33007812, + -0.5625, + 0.08105469, + -0.13769531, + 0.8515625, + -0.14453125, + 0.77734375, + -0.48046875, + -0.3515625, + -0.25390625, + -0.09277344, + 0.23925781, + -0.022338867, + -0.45898438, + 0.36132812, + -0.23828125, + 0.265625, + -0.48632812, + -0.46875, + -0.75390625, + 1.3125, + 0.78125, + -0.63671875, + -1.21875, + 0.5078125, + -0.27734375, + -0.118652344, + 0.041992188, + -0.14648438, + -0.8046875, + 0.21679688, + -0.79296875, + 0.28320312, + -0.09667969, + 0.42773438, + 0.49414062, + 0.44726562, + 0.21972656, + -0.02746582, + -0.03540039, + -0.14941406, + -0.515625, + -0.27929688, + 0.9609375, + -0.007598877, + 0.34765625, + -0.060546875, + -0.44726562, + 0.7421875, + 0.15332031, + 0.45117188, + -0.4921875, + 0.07080078, + 0.5625, + 0.3984375, + -0.20019531, + 0.014892578, + 0.63671875, + -0.0071411133, + 0.016357422, + 1.0625, + 0.049316406, + 0.18066406, + 0.09814453, + -0.52734375, + -0.359375, + -0.072265625, + -0.41992188, + 0.39648438, + 0.38671875, + -0.30273438, + -0.056640625, + -0.640625, + -0.44921875, + 0.49414062, + 0.29101562, + 0.49609375, + 0.40429688, + -0.10205078, + 0.49414062, + -0.28125, + -0.12695312, + -0.0022735596, + -0.37304688, + 0.122558594, + 0.07519531, + -0.12597656, + -0.38085938, + -0.19824219, + -0.40039062, + 0.56640625, + -1.140625, + -0.515625, + -0.17578125, + -0.765625, + -0.43945312, + 0.3359375, + -0.24707031, + 0.32617188, + -0.45117188, + -0.37109375, + 0.45117188, + -0.27539062, + -0.38867188, + 0.09082031, + 0.17675781, + 0.49414062, + 0.19921875, + 0.17480469, + 0.8515625, + -0.23046875, + -0.234375, + -0.28515625, + 0.10253906, + 0.29101562, + -0.3359375, + -0.203125, + 0.6484375, + 0.11767578, + -0.20214844, + -0.42382812, + 0.26367188, + 0.6328125, + 0.0059509277, + 0.08691406, + -1.5625, + -0.43554688, + 0.17675781, + 0.091796875, + -0.5234375, + -0.09863281, + 0.20605469, + 0.16601562, + -0.578125, + 0.017700195, + 0.41015625, + 1.03125, + -0.55078125, + 0.21289062, + -0.35351562, + 0.24316406, + -0.123535156, + 0.11035156, + -0.48242188, + -0.34179688, + 0.45117188, + 0.3125, + -0.071777344, + 0.12792969, + 0.55859375, + 0.063964844, + -0.21191406, + 0.01965332, + -1.359375, + -0.21582031, + -0.019042969, + 0.16308594, + -0.3671875, + -0.40625, + -1.0234375, + -0.21289062, + 0.24023438, + -0.28125, + 0.26953125, + -0.14550781, + -0.087890625, + 0.16113281, + -0.49804688, + -0.17675781, + -0.890625, + 0.27929688, + 0.484375, + 0.27148438, + 0.11816406, + 0.83984375, + 0.029052734, + -0.890625, + 0.66796875, + 0.78515625, + -0.953125, + 0.49414062, + -0.546875, + 0.106933594, + -0.08251953, + 0.2890625, + -0.1484375, + -0.85546875, + 0.32421875, + -0.0040893555, + -0.16601562, + -0.16699219, + 0.24414062, + -0.5078125, + 0.25390625, + -0.10253906, + 0.15625, + 0.140625, + -0.27539062, + -0.546875, + -0.5546875, + -0.71875, + 0.37304688, + 0.060058594, + -0.076171875, + 0.44921875, + 0.06933594, + -0.28710938, + -0.22949219, + 0.17578125, + 0.09814453, + 0.4765625, + -0.95703125, + -0.03540039, + 0.21289062, + -0.7578125, + -0.07373047, + 0.10546875, + 0.07128906, + 0.76171875, + 0.4296875, + -0.09375, + 0.27539062, + -0.55078125, + 0.29882812, + -0.42382812, + 0.32617188, + -0.39648438, + 0.12451172, + 0.16503906, + -0.22460938, + -0.65625, + -0.022094727, + 0.61328125, + -0.024780273, + 0.62109375, + -0.033447266, + 0.515625, + 0.12890625, + -0.21875, + -0.08642578, + 0.49804688, + -0.2265625, + -0.29296875, + 0.19238281, + 0.3515625, + -1.265625, + 0.57421875, + 0.20117188, + -0.28320312, + 0.1953125, + -0.30664062, + 0.2265625, + -0.11230469, + 0.83984375, + 0.111328125, + 0.265625, + 0.71484375, + -0.625, + 0.38867188, + 0.47070312, + -0.32617188, + -0.171875, + 1.0078125, + 0.19726562, + -0.118652344, + 0.63671875, + -0.068359375, + -0.25585938, + 0.4140625, + -0.29296875, + 0.21386719, + -0.064453125, + 0.15820312, + -0.89453125, + -0.16308594, + 0.48046875, + 0.14648438, + -0.5703125, + 0.84765625, + -0.19042969, + 0.03515625, + 0.42578125, + -0.27539062, + -0.5390625, + 0.95703125, + 0.2734375, + 0.16699219, + -0.328125, + 0.11279297, + 0.003250122, + 0.47265625, + -0.31640625, + 0.546875, + 0.55859375, + 0.06933594, + -0.61328125, + -0.16210938, + -0.375, + 0.100097656, + -0.088378906, + 0.12695312, + 0.079589844, + 0.123535156, + -1.0078125, + 0.6875, + 0.022949219, + -0.40039062, + -0.09863281, + 0.29101562, + -1.2890625, + -0.20996094, + 0.36328125, + -0.3515625, + 0.7890625, + 0.12207031, + 0.48046875, + -0.13671875, + -0.041015625, + 0.19824219, + 0.19921875, + 0.01171875, + -0.37695312, + -0.62890625, + 0.9375, + -0.671875, + 0.24609375, + 0.6484375, + -0.29101562, + 0.076171875, + 0.62109375, + -0.5546875, + 0.36523438, + 0.75390625, + -0.19140625, + -0.875, + -0.8203125, + -0.24414062, + -0.625, + 0.1796875, + -0.40039062, + 0.25390625, + -0.14550781, + -0.21679688, + -0.828125, + 0.3359375, + 0.43554688, + 0.55078125, + -0.44921875, + -0.28710938, + 0.24023438, + 0.18066406, + -0.6953125, + 0.020385742, + -0.11376953, + 0.13867188, + -0.92578125, + 0.33398438, + -0.328125, + 0.78125, + -0.45507812, + -0.07470703, + 0.34179688, + 0.07080078, + 0.76171875, + 0.37890625, + -0.10644531, + 0.90234375, + -0.21875, + -0.15917969, + -0.36132812, + 0.2109375, + -0.45703125, + -0.76953125, + 0.21289062, + 0.26367188, + 0.49804688, + 0.35742188, + -0.20019531, + 0.31054688, + 0.34179688, + 0.17089844, + -0.15429688, + 0.39648438, + -0.5859375, + 0.20996094, + -0.40039062, + 0.5703125, + -0.515625, + 0.5234375, + 0.049560547, + 0.328125, + 0.24804688, + 0.42578125, + 0.609375, + 0.19238281, + 0.27929688, + 0.19335938, + 0.78125, + -0.9921875, + 0.23925781, + -1.3828125, + -0.22949219, + -0.578125, + -0.13964844, + -0.17382812, + -0.011169434, + 0.26171875, + -0.73046875, + -1.4375, + 0.6953125, + -0.7421875, + 0.052246094, + 0.12207031, + 1.3046875, + 0.38867188, + 0.040283203, + -0.546875, + -0.0021514893, + 0.18457031, + -0.5546875, + -0.51171875, + -0.16308594, + -0.104003906, + -0.38867188, + -0.20996094, + -0.8984375, + 0.6015625, + -0.30078125, + -0.13769531, + 0.16113281, + 0.58203125, + -0.23730469, + -0.125, + -1.0234375, + 0.875, + -0.7109375, + 0.29101562, + 0.09667969, + -0.3203125, + -0.48046875, + 0.37890625, + 0.734375, + -0.28710938, + -0.29882812, + -0.05493164, + 0.34765625, + -0.84375, + 0.65625, + 0.578125, + -0.20019531, + 0.13769531, + 0.10058594, + -0.37109375, + 0.36523438, + -0.22167969, + 0.72265625, + ], + "inputTextTokenCount": 6, + }, + ], + "cohere.embed-english-v3::This is an embedding test.": [ + {"Content-Type": "application/json", "x-amzn-RequestId": "11233989-07e8-4ecb-9ba6-79601ba6d8cc"}, + 200, + { + "embeddings": [ + [ + 0.03390503, + 0.010032654, + 0.020904541, + -0.017105103, + -0.020050049, + -0.015411377, + -0.012001038, + -0.019744873, + -0.0107803345, + 0.012702942, + -0.043273926, + 0.003583908, + -0.023422241, + -0.017440796, + 0.03277588, + -0.04321289, + 0.018661499, + 0.05697632, + 0.041107178, + -0.007549286, + -0.026916504, + 0.012214661, + 0.00012886524, + -0.03010559, + -0.011680603, + -0.008392334, + 0.06222534, + -0.026260376, + 0.026947021, + -0.018692017, + 0.0016307831, + -0.018798828, + -0.00025224686, + 0.03982544, + -0.017501831, + 0.03665161, + -0.040985107, + 0.029296875, + 0.025283813, + -0.013618469, + 0.0038471222, + -0.007751465, + 0.017501831, + -0.03756714, + -0.012863159, + -0.03781128, + 0.043945312, + -0.018112183, + 0.01713562, + -0.04736328, + 0.005264282, + 0.0018386841, + -0.0018186569, + 0.016540527, + -0.012268066, + 0.061462402, + 0.006629944, + 0.02671814, + 0.039733887, + -0.0026226044, + -0.037628174, + 0.007675171, + 0.02418518, + -0.039855957, + 0.016586304, + -0.034820557, + -0.016113281, + 0.03262329, + 0.07537842, + -0.00554657, + -0.014450073, + 0.013061523, + 0.0056991577, + 0.007396698, + -0.055114746, + 0.032684326, + 0.0051460266, + 0.046173096, + 0.009277344, + -0.052337646, + -0.022872925, + 0.013946533, + -0.03643799, + 0.004562378, + -0.018218994, + 0.019851685, + 0.0038719177, + -0.032958984, + 0.04147339, + -0.016616821, + -0.010231018, + 0.099121094, + 0.0015497208, + 0.06021118, + -0.045562744, + 0.010559082, + 0.021408081, + -0.009635925, + -0.04067993, + -0.03060913, + -0.020248413, + -0.049346924, + 0.017562866, + -0.019943237, + 0.008331299, + -0.027755737, + -0.016693115, + -0.052368164, + 0.044647217, + 0.027954102, + -0.034332275, + 0.09460449, + -0.013832092, + -0.022888184, + 0.0033721924, + 0.015457153, + -0.03062439, + 0.026794434, + 0.0104904175, + -0.018737793, + 0.0060920715, + 0.027297974, + 0.027786255, + -0.016799927, + 0.022872925, + 0.043640137, + 0.0036582947, + -0.05267334, + 0.010169983, + -0.030258179, + -0.021530151, + 0.0496521, + -0.05883789, + -0.03439331, + -0.017944336, + -0.006149292, + -0.066223145, + -0.017593384, + 0.0016317368, + -0.0027751923, + -0.0028533936, + -0.057434082, + 0.04800415, + 0.01374054, + -0.06756592, + 0.087768555, + 0.04269409, + -0.032226562, + -0.04321289, + -0.08850098, + -0.022201538, + 0.0009198189, + -0.0046043396, + 0.029724121, + 0.01979065, + 0.03753662, + -0.05343628, + 0.033111572, + 0.034332275, + 0.071777344, + 0.0063934326, + -0.034606934, + -0.003545761, + 0.008972168, + 0.008232117, + 0.033447266, + 0.015823364, + 0.027297974, + 0.018981934, + 0.120910645, + -0.037872314, + 0.0038814545, + -0.0031642914, + 0.0071029663, + -0.022644043, + -0.008758545, + 0.0023460388, + -0.025802612, + 0.034332275, + -0.0021533966, + 0.02268982, + -0.008506775, + 0.010147095, + 0.022827148, + 0.0007414818, + -0.055999756, + 0.03237915, + -0.01083374, + -0.014343262, + 0.028793335, + 0.0068511963, + 0.031402588, + 0.023269653, + -0.013748169, + 0.00014042854, + 0.0007624626, + -0.03111267, + 0.007549286, + -0.0236969, + -0.00043439865, + -0.0058670044, + 0.013587952, + -0.0029067993, + -0.0052948, + 0.015701294, + -0.005924225, + 0.032104492, + -0.0017576218, + 0.052947998, + 0.011299133, + -0.03152466, + -0.027526855, + 0.031051636, + -0.04232788, + -0.048217773, + 0.04055786, + -0.038482666, + -0.06088257, + -0.016540527, + -0.027114868, + 0.008636475, + 0.06008911, + -0.038513184, + 0.023330688, + 0.0054473877, + 0.018325806, + -0.017288208, + -7.2062016e-05, + 0.0064430237, + 0.02357483, + 0.02166748, + -0.043060303, + -0.009613037, + 0.013504028, + -0.010856628, + -0.018585205, + -0.00041294098, + -0.012687683, + 0.019302368, + 0.03250122, + 0.03503418, + 0.037353516, + -0.01272583, + -0.039215088, + 0.05230713, + 0.008918762, + 0.020614624, + -0.012039185, + -0.041534424, + 0.0317688, + 0.012168884, + -0.0027694702, + -0.023773193, + 0.0068855286, + -0.04309082, + 0.034820557, + 0.018463135, + 0.048736572, + -0.0016841888, + 0.032836914, + -0.070617676, + 0.04473877, + 0.052581787, + -0.042114258, + -0.017456055, + -0.03945923, + -0.0040626526, + 0.016433716, + 0.02368164, + -0.04034424, + 0.006038666, + 0.005634308, + -5.722046e-06, + -0.01864624, + 0.0635376, + -0.041229248, + -0.026809692, + -0.009262085, + 0.0011701584, + -0.0053367615, + 0.020935059, + 0.04473877, + 0.03665161, + 0.01121521, + 0.017486572, + 0.061920166, + 0.020812988, + 0.013786316, + 0.0006785393, + 0.0027122498, + 0.012237549, + 0.07446289, + -0.021011353, + 0.06921387, + 0.046966553, + 0.028945923, + 0.00044202805, + 0.03488159, + 0.0034942627, + -0.0038585663, + -0.023269653, + 0.04852295, + -0.01525116, + 0.032836914, + 0.013153076, + 0.0014123917, + -0.005718231, + 0.038024902, + 0.015182495, + -0.0143585205, + -0.008659363, + 0.024093628, + 0.008972168, + -0.011962891, + 0.005367279, + -0.027297974, + 0.02696228, + -0.0063972473, + -0.008087158, + -0.015899658, + 0.07122803, + 0.0463562, + -0.06713867, + -0.02230835, + 0.011940002, + -0.0015964508, + -0.049438477, + -0.04864502, + -0.06262207, + -0.015029907, + -0.0049057007, + -0.084472656, + -0.011177063, + -0.031555176, + 0.0035552979, + -0.028427124, + 0.021759033, + 0.016174316, + 0.0390625, + 0.04168701, + 0.07714844, + -0.0064086914, + 0.013000488, + -0.011512756, + -0.0021686554, + -0.032196045, + -0.057678223, + 0.010017395, + -0.06793213, + -0.04220581, + 0.06793213, + -0.029144287, + -0.02229309, + 0.03074646, + 0.03265381, + -0.020050049, + 0.021911621, + 0.055236816, + 0.05480957, + -0.015823364, + 0.04815674, + 0.009384155, + 0.024383545, + -0.034484863, + -0.042114258, + -0.06744385, + -0.011207581, + 0.010749817, + 0.005012512, + 0.029510498, + 0.04977417, + 0.0070648193, + 0.00050497055, + -0.005710602, + -0.063964844, + -0.030807495, + -0.0013856888, + -0.026794434, + -0.024383545, + -0.025817871, + 0.00945282, + -0.008171082, + 0.071777344, + -0.018493652, + 0.041778564, + 0.0012550354, + 0.024902344, + 0.07366943, + 0.02381897, + -0.0016851425, + -0.015945435, + 0.035461426, + -0.038391113, + -0.02961731, + 0.020401001, + 0.0063171387, + 0.035308838, + 0.016586304, + -0.036590576, + -0.04522705, + 0.046722412, + -0.04901123, + -0.028076172, + -0.025787354, + -0.022567749, + -0.00843811, + 0.03778076, + 0.00020611286, + -0.006668091, + 0.027648926, + 0.027008057, + -0.011711121, + -0.0019445419, + 0.030456543, + 0.0038223267, + -0.037872314, + -0.019805908, + 0.017333984, + -0.023986816, + 0.0012874603, + -0.0053596497, + 0.02305603, + -0.03012085, + -0.013389587, + 0.016159058, + 0.020629883, + 0.04159546, + -0.008338928, + 0.029571533, + -0.0005707741, + 0.0231781, + -0.040863037, + -0.012886047, + -0.011627197, + -0.0574646, + 0.0011692047, + -0.0060691833, + -0.010749817, + 0.03567505, + -0.051757812, + 0.009735107, + 0.016159058, + 0.037139893, + -0.013214111, + 0.013938904, + -0.025482178, + 0.04647827, + 0.016418457, + -0.018936157, + 0.040008545, + -0.054595947, + 0.007865906, + -0.022872925, + 0.02508545, + -0.033935547, + 0.004310608, + 0.027008057, + 0.03010559, + 0.020736694, + 0.020111084, + 0.037719727, + -0.015487671, + -0.04598999, + 0.016189575, + -0.009643555, + 0.022399902, + 0.027786255, + 0.013580322, + -0.013595581, + -0.004825592, + 0.039855957, + -0.05834961, + -0.016906738, + -0.016235352, + -0.01826477, + -0.0053520203, + 0.031402588, + 0.023986816, + -0.012367249, + -0.02835083, + -0.004310608, + -0.025115967, + -0.05899048, + -0.036987305, + 0.01574707, + -0.007926941, + -0.030853271, + 0.04458618, + 0.00818634, + 0.017059326, + 4.7802925e-05, + 0.0062294006, + 0.028930664, + -0.027618408, + -0.013557434, + -0.0093307495, + -0.012741089, + -0.009307861, + 0.0032444, + -0.09460449, + -0.0552063, + 0.034576416, + 0.02178955, + 0.024612427, + 0.013587952, + -0.041656494, + -0.029647827, + 0.010848999, + 0.045959473, + -0.001698494, + 0.031341553, + 0.016693115, + 0.027145386, + -0.029541016, + -0.011222839, + 0.08703613, + -0.017303467, + -0.009376526, + 0.025436401, + -0.020217896, + 0.06939697, + 0.023651123, + 0.05065918, + -0.010749817, + -8.738041e-05, + 0.019195557, + 0.024917603, + -0.009590149, + -0.033172607, + -0.025314331, + 0.0049819946, + -0.0070266724, + 0.019622803, + -0.023605347, + 0.030258179, + 0.03869629, + -0.036834717, + -0.0025596619, + 0.007320404, + -0.021438599, + -0.0044021606, + -0.0052604675, + -0.050109863, + 0.0051498413, + -0.011734009, + -0.027770996, + -0.0043258667, + 0.07495117, + 0.007820129, + 0.03930664, + 0.058563232, + -0.006385803, + -0.04055786, + -0.02609253, + -0.03265381, + -0.02670288, + -0.013587952, + 0.015548706, + -0.047790527, + -0.010292053, + -0.02508545, + -0.005592346, + -0.025299072, + 0.023254395, + 0.0043945312, + 0.0062408447, + 0.006996155, + -0.015060425, + -0.0059814453, + -0.033325195, + -0.024520874, + -0.015472412, + 0.01676941, + -0.011817932, + 0.03173828, + 0.018981934, + -0.03488159, + -0.005340576, + -0.003358841, + 0.045715332, + 0.03314209, + 0.050964355, + -0.018859863, + 0.0541687, + 0.025115967, + 0.025894165, + -0.028366089, + -0.0070533752, + -0.022506714, + 0.018463135, + 0.0068588257, + -0.023742676, + -0.011627197, + -0.05935669, + 0.026000977, + -0.013893127, + 0.06555176, + -0.010292053, + -0.020202637, + 0.018432617, + -0.0043754578, + 0.030548096, + 0.0262146, + 0.027801514, + -0.039001465, + 0.026412964, + 0.028793335, + 0.0063476562, + -0.0027694702, + -0.014305115, + 0.022003174, + 0.0017242432, + -0.02116394, + 0.028152466, + -0.027023315, + -0.008705139, + -0.0037574768, + 0.048034668, + 0.010238647, + -0.020324707, + 0.03086853, + -0.031066895, + 0.0146102905, + 0.014930725, + -0.014785767, + -0.010292053, + 0.017929077, + -0.010429382, + -0.00019311905, + -0.0012149811, + -0.026733398, + -0.026031494, + -0.050048828, + -0.07861328, + -0.017684937, + 0.061706543, + -0.011001587, + -0.041168213, + 0.003314972, + 0.029876709, + -0.009559631, + 0.032348633, + 0.0635376, + -0.040252686, + 0.056365967, + -0.0446167, + 0.026031494, + 0.017089844, + -0.04397583, + 0.044311523, + 0.0068740845, + 0.034454346, + 0.025848389, + 0.02027893, + 0.005153656, + 0.04159546, + -0.008239746, + 0.0056381226, + -0.0033721924, + -0.0692749, + -0.0038280487, + 0.022140503, + -0.008087158, + 0.0051727295, + -0.0102005005, + 0.0009098053, + 0.04067993, + -0.0065193176, + 0.026031494, + -0.08728027, + -0.027648926, + 0.04373169, + -0.048187256, + -0.033233643, + -0.014953613, + 0.022720337, + -0.004333496, + 0.02609253, + 0.0017251968, + -0.017868042, + -0.036956787, + -0.01838684, + 0.06665039, + 0.0259552, + -0.053497314, + -0.03111267, + 0.050872803, + 0.036895752, + -0.030944824, + -0.031921387, + -0.0569458, + 0.020248413, + -0.02229309, + 0.002916336, + 0.0076942444, + -0.0060691833, + -0.0317688, + -0.013793945, + 0.015068054, + -0.004508972, + -0.0047798157, + -0.0021457672, + 0.0003311634, + -0.036346436, + 0.0023174286, + 0.018096924, + 0.0063323975, + -0.014152527, + 0.0023460388, + 0.019836426, + -0.00233078, + 0.009048462, + -0.04812622, + -0.028442383, + 0.04925537, + 0.0043754578, + 0.04650879, + 0.055358887, + 0.036499023, + -0.044677734, + -0.012786865, + -0.013916016, + 0.025985718, + -0.033691406, + 0.010375977, + 0.036590576, + -0.036376953, + 0.009384155, + 0.0012626648, + -0.017623901, + -0.00032114983, + -0.026428223, + 0.018112183, + -0.016098022, + 0.0066375732, + -0.08355713, + 0.024291992, + 0.043670654, + -0.0067367554, + 0.01763916, + -0.0057640076, + -0.0154953, + 0.04196167, + 0.005542755, + 0.026901245, + 0.06427002, + 0.010612488, + 0.040222168, + 0.033966064, + 0.017028809, + 0.02748108, + 0.007980347, + -0.045013428, + -0.001121521, + 0.001408577, + 0.037750244, + 0.013549805, + -0.016967773, + -0.047729492, + 0.027496338, + -0.064331055, + 0.010917664, + -0.013870239, + 0.03668213, + 0.0055236816, + 0.0087509155, + -0.0847168, + -0.009521484, + 0.0703125, + -0.03338623, + -0.011062622, + 0.06555176, + -0.011268616, + -0.08477783, + 0.014633179, + 0.0045928955, + -0.0029029846, + -0.0050849915, + -0.016082764, + 0.037017822, + 0.023406982, + 0.03765869, + -0.032714844, + -0.03692627, + -0.0057411194, + -0.026748657, + 0.0107040405, + 0.033050537, + 0.018829346, + 0.058685303, + 0.0005726814, + 0.026947021, + 0.004272461, + -0.006614685, + -0.0018100739, + -0.024353027, + -0.007835388, + 0.0016746521, + 0.00806427, + -0.008636475, + 0.031188965, + -0.08416748, + 0.05014038, + 0.0073242188, + 0.017822266, + -0.08282471, + 0.010810852, + 0.07312012, + 0.014053345, + 0.00025081635, + 0.0015468597, + 0.00020134449, + -0.0043296814, + -0.050750732, + -0.05758667, + 0.002746582, + 0.030395508, + 0.014060974, + -0.047302246, + -0.045776367, + 0.0045928955, + 0.01739502, + 0.010063171, + 0.0031433105, + -0.005428314, + 0.0031604767, + 0.018371582, + -0.025680542, + 0.0076446533, + 0.0026683807, + 0.025604248, + -0.025741577, + 0.05001831, + 0.06768799, + 0.049713135, + 0.016220093, + -0.06008911, + -0.034942627, + 0.024490356, + -0.01651001, + 0.026443481, + -0.06097412, + 0.04675293, + 0.034240723, + -0.06555176, + 0.02267456, + 0.012382507, + -0.023132324, + 0.015914917, + 0.027236938, + 0.033081055, + 0.025436401, + -0.018951416, + 0.015510559, + 0.0289917, + 0.06317139, + 0.02935791, + -0.03189087, + -0.015930176, + 0.0011873245, + -0.028625488, + 0.013977051, + -0.0012779236, + 0.04220581, + 0.025772095, + 0.009117126, + -0.052642822, + -0.009880066, + -0.032836914, + -0.028945923, + 0.027267456, + 0.07165527, + -0.005748749, + 0.01701355, + 0.0049972534, + -0.005130768, + 0.049835205, + -0.02015686, + 0.03857422, + 0.014305115, + 0.022415161, + 0.025924683, + 0.04031372, + -0.00015962124, + -0.02267456, + 0.003648758, + -0.008026123, + 0.042755127, + -0.004512787, + -0.022079468, + 0.010383606, + -0.014602661, + 0.026138306, + 0.020751953, + -0.025787354, + -0.000538826, + -0.013442993, + -0.00869751, + -0.017547607, + -0.03704834, + -0.010871887, + -0.0012283325, + 0.008880615, + -0.047088623, + -0.008216858, + 0.014083862, + -0.0015964508, + -0.028839111, + -0.00017225742, + -0.038604736, + 0.00187397, + 0.00504303, + 0.017990112, + 0.036224365, + -0.011581421, + -0.01436615, + 0.01626587, + 0.0026187897, + 0.064086914, + 0.016433716, + -0.010345459, + -0.036102295, + 0.025878906, + -0.04260254, + -0.0109939575, + 0.010246277, + 0.006877899, + -0.04071045, + -0.021224976, + -0.003982544, + 0.010421753, + -0.0345459, + -0.073791504, + -0.008987427, + 0.01260376, + -0.043762207, + 0.01210022, + -0.011390686, + -0.0007429123, + -0.027786255, + -0.023620605, + 0.019165039, + -0.010894775, + 0.004272461, + -0.0597229, + 0.036499023, + -0.049224854, + -0.04663086, + -0.02243042, + -0.0018253326, + 0.027572632, + -0.015159607, + -0.014411926, + -0.0033721924, + 0.032470703, + 0.041168213, + -0.021713257, + -0.027160645, + 0.025726318, + 0.048431396, + -0.031829834, + 0.037841797, + 0.04638672, + 0.014976501, + -0.024612427, + 0.0014600754, + -0.04031372, + -0.0011501312, + 0.004142761, + 0.012207031, + -0.00806427, + -0.009025574, + -0.051513672, + 0.030807495, + 0.016998291, + -0.049194336, + 0.0038776398, + -0.0042533875, + -0.04260254, + -0.008239746, + -0.0060195923, + 0.01473999, + 0.0034885406, + -0.0063171387, + -0.048614502, + 0.037628174, + -0.022247314, + -0.018951416, + 0.02192688, + -0.0065994263, + -0.02519226, + -0.0004734993, + -0.036102295, + 0.009109497, + -0.0029640198, + -0.012290955, + 0.011711121, + -0.034942627, + 0.043273926, + 0.022644043, + -0.026351929, + -0.014381409, + 0.044433594, + -0.04949951, + -0.025878906, + -0.01890564, + 0.010566711, + -0.017684937, + -0.06555176, + 0.047912598, + -0.031921387, + 0.047943115, + -0.061584473, + 0.051605225, + 0.009773254, + 0.016525269, + 0.0025367737, + -0.064086914, + 0.031311035, + -0.041778564, + -0.03250122, + -0.044158936, + -0.0135650635, + 0.008224487, + ] + ], + "id": "d26b1832-cd83-40cf-91e9-d96505b89ae8", + "response_type": "embeddings_floats", + "texts": ["This is an embedding test."], + }, + ], + "does-not-exist::": [ + { + "Content-Type": "application/json", + "x-amzn-RequestId": "f4908827-3db9-4742-9103-2bbc34578b03", + "x-amzn-ErrorType": "ValidationException:http://internal.amazon.com/coral/com.amazon.bedrock/", + }, + 400, + {"message": "The provided model identifier is invalid."}, + ], + "ai21.j2-mid-v1::Invalid Token": [ + { + "Content-Type": "application/json", + "x-amzn-RequestId": "9021791d-3797-493d-9277-e33aa6f6d544", + "x-amzn-ErrorType": "UnrecognizedClientException:http://internal.amazon.com/coral/com.amazon.coral.service/", + }, + 403, + {"message": "The security token included in the request is invalid."}, + ], + "amazon.titan-embed-g1-text-02::Invalid Token": [ + { + "Content-Type": "application/json", + "x-amzn-RequestId": "73328313-506e-4da8-af0f-51017fa6ca3f", + "x-amzn-ErrorType": "UnrecognizedClientException:http://internal.amazon.com/coral/com.amazon.coral.service/", + }, + 403, + {"message": "The security token included in the request is invalid."}, + ], + "amazon.titan-embed-text-v1::Invalid Token": [ + { + "Content-Type": "application/json", + "x-amzn-RequestId": "aece6ad7-e2ff-443b-a953-ba7d385fd0cc", + "x-amzn-ErrorType": "UnrecognizedClientException:http://internal.amazon.com/coral/com.amazon.coral.service/", + }, + 403, + {"message": "The security token included in the request is invalid."}, + ], + "cohere.embed-english-v3::Invalid Token": [ + { + "Content-Type": "application/json", + "x-amzn-RequestId": "73328313-506e-4da8-af0f-51017fa6ca3f", + "x-amzn-ErrorType": "UnrecognizedClientException:http://internal.amazon.com/coral/com.amazon.coral.service/", + }, + 403, + {"message": "The security token included in the request is invalid."}, + ], + "amazon.titan-text-express-v1::Invalid Token": [ + { + "Content-Type": "application/json", + "x-amzn-RequestId": "15b39c8b-8e85-42c9-9623-06720301bda3", + "x-amzn-ErrorType": "UnrecognizedClientException:http://internal.amazon.com/coral/com.amazon.coral.service/", + }, + 403, + {"message": "The security token included in the request is invalid."}, + ], + "anthropic.claude-instant-v1::Human: Invalid Token Assistant:": [ + { + "Content-Type": "application/json", + "x-amzn-RequestId": "37396f55-b721-4bae-9461-4c369f5a080d", + "x-amzn-ErrorType": "UnrecognizedClientException:http://internal.amazon.com/coral/com.amazon.coral.service/", + }, + 403, + {"message": "The security token included in the request is invalid."}, + ], + "cohere.command-text-v14::Invalid Token": [ + { + "Content-Type": "application/json", + "x-amzn-RequestId": "22476490-a0d6-42db-b5ea-32d0b8a7f751", + "x-amzn-ErrorType": "UnrecognizedClientException:http://internal.amazon.com/coral/com.amazon.coral.service/", + }, + 403, + {"message": "The security token included in the request is invalid."}, + ], + "meta.llama2-13b-chat-v1::Invalid Token": [ + { + "Content-Type": "application/json", + "x-amzn-RequestId": "22476490-a0d6-42db-b5ea-32d0b8a7f751", + "x-amzn-ErrorType": "UnrecognizedClientException:http://internal.amazon.com/coral/com.amazon.coral.service/", + }, + 403, + {"message": "The security token included in the request is invalid."}, + ], + "amazon.titan-text-express-v1::Malformed Body": [ + {"Content-Type": "application/json", "x-amzn-RequestId": "81508a1c-33a8-4294-8743-f0c629af2f49"}, + 200, + { + "inputTextTokenCount": 12, + "results": [ + { + "tokenCount": 32, + "outputText": "\n1 degree Fahrenheit is 0.56 Celsius. Therefore, 212 degree Fahrenheit in Celsius would be 115.42.", + "completionReason": "FINISH", + } + ], + }, + ], + "amazon.titan-embed-g1-text-02::Malformed Body": [ + {"Content-Type": "application/json", "x-amzn-RequestId": "b10ac895-eae3-4f07-b926-10b2866c55ed"}, + 200, + { + "embedding": [ + -0.14160156, + 0.034423828, + 0.54296875, + 0.10986328, + 0.053466797, + 0.3515625, + 0.12988281, + -0.0002708435, + -0.21484375, + 0.060302734, + 0.58984375, + -0.5859375, + 0.52734375, + 0.82421875, + -0.91015625, + -0.19628906, + 0.45703125, + 0.609375, + -0.67578125, + 0.39453125, + -0.46875, + -0.25390625, + -0.21191406, + 0.114746094, + 0.31640625, + -0.41015625, + -0.32617188, + -0.43554688, + 0.4765625, + -0.4921875, + 0.40429688, + 0.06542969, + 0.859375, + -0.008056641, + -0.19921875, + 0.072753906, + 0.33203125, + 0.69921875, + 0.39453125, + 0.15527344, + 0.08886719, + -0.25, + 0.859375, + 0.22949219, + -0.19042969, + 0.13769531, + -0.078125, + 0.41210938, + 0.875, + 0.5234375, + 0.59765625, + -0.22949219, + -0.22558594, + -0.47460938, + 0.37695312, + 0.51953125, + -0.5703125, + 0.46679688, + 0.43554688, + 0.17480469, + -0.080566406, + -0.16699219, + -0.734375, + -1.0625, + -0.33984375, + 0.390625, + -0.18847656, + -0.5234375, + -0.48828125, + 0.44921875, + -0.09814453, + -0.3359375, + 0.087402344, + 0.36914062, + 1.3203125, + 0.25585938, + 0.14746094, + -0.059570312, + -0.15820312, + -0.037353516, + -0.61328125, + -0.6484375, + -0.35351562, + 0.55078125, + -0.26953125, + 0.90234375, + 0.3671875, + 0.31054688, + 0.00014019012, + -0.171875, + 0.025512695, + 0.5078125, + 0.11621094, + 0.33203125, + 0.8125, + -0.3046875, + -1.078125, + -0.5703125, + 0.26171875, + -0.4609375, + 0.203125, + 0.44726562, + -0.5078125, + 0.41601562, + -0.1953125, + 0.028930664, + -0.57421875, + 0.2265625, + 0.13574219, + -0.040039062, + -0.22949219, + -0.515625, + -0.19042969, + -0.30078125, + 0.10058594, + -0.66796875, + 0.6015625, + 0.296875, + -0.765625, + -0.87109375, + 0.2265625, + 0.068847656, + -0.088378906, + -0.1328125, + -0.796875, + -0.37304688, + 0.47460938, + -0.3515625, + -0.8125, + -0.32226562, + 0.265625, + 0.3203125, + -0.4140625, + -0.49023438, + 0.859375, + -0.19140625, + -0.6328125, + 0.10546875, + -0.5625, + 0.66015625, + 0.26171875, + -0.2109375, + 0.421875, + -0.82421875, + 0.29296875, + 0.17773438, + 0.24023438, + 0.5078125, + -0.49804688, + -0.10205078, + 0.10498047, + -0.36132812, + -0.47460938, + -0.20996094, + 0.010070801, + -0.546875, + 0.66796875, + -0.123046875, + -0.75390625, + 0.19628906, + 0.17480469, + 0.18261719, + -0.96875, + -0.26171875, + 0.4921875, + -0.40039062, + 0.296875, + 0.1640625, + -0.20507812, + -0.36132812, + 0.76171875, + -1.234375, + -0.625, + 0.060058594, + -0.09375, + -0.14746094, + 1.09375, + 0.057861328, + 0.22460938, + -0.703125, + 0.07470703, + 0.23828125, + -0.083984375, + -0.54296875, + 0.5546875, + -0.5, + -0.390625, + 0.106933594, + 0.6640625, + 0.27734375, + -0.953125, + 0.35351562, + -0.7734375, + -0.77734375, + 0.16503906, + -0.42382812, + 0.36914062, + 0.020141602, + -1.3515625, + 0.18847656, + 0.13476562, + -0.034179688, + -0.03930664, + -0.03857422, + -0.027954102, + 0.73828125, + -0.18945312, + -0.09814453, + -0.46289062, + 0.36914062, + 0.033203125, + 0.020874023, + -0.703125, + 0.91796875, + 0.38671875, + 0.625, + -0.19335938, + -0.16796875, + -0.58203125, + 0.21386719, + -0.032470703, + -0.296875, + -0.15625, + -0.1640625, + -0.74609375, + 0.328125, + 0.5546875, + -0.1953125, + 1.0546875, + 0.171875, + -0.099609375, + 0.5234375, + 0.05078125, + -0.35742188, + -0.2734375, + -1.3203125, + -0.8515625, + -0.16015625, + 0.01574707, + 0.29296875, + 0.18457031, + -0.265625, + 0.048339844, + 0.045654297, + -0.32226562, + 0.087890625, + -0.0047302246, + 0.38671875, + 0.10644531, + -0.06225586, + 1.03125, + 0.94140625, + -0.3203125, + 0.20800781, + -1.171875, + 0.48046875, + -0.091796875, + 0.20800781, + -0.1328125, + -0.20507812, + 0.28125, + -0.47070312, + -0.09033203, + 0.0013809204, + -0.08203125, + 0.43359375, + -0.03100586, + -0.060791016, + -0.53515625, + -1.46875, + 0.000101566315, + 0.515625, + 0.40625, + -0.10498047, + -0.15820312, + -0.009460449, + -0.77734375, + -0.5859375, + 0.9765625, + 0.099609375, + 0.51953125, + 0.38085938, + -0.09667969, + -0.100097656, + -0.5, + -1.3125, + -0.18066406, + -0.099121094, + 0.26171875, + -0.14453125, + -0.546875, + 0.17578125, + 0.484375, + 0.765625, + 0.45703125, + 0.2734375, + 0.0028076172, + 0.17089844, + -0.32421875, + -0.37695312, + 0.30664062, + -0.48046875, + 0.07128906, + 0.031982422, + -0.31054688, + -0.055419922, + -0.29296875, + 0.3359375, + -0.296875, + 0.47851562, + -0.05126953, + 0.18457031, + -0.01953125, + -0.35742188, + 0.017944336, + -0.25, + 0.10595703, + 0.17382812, + -0.73828125, + 0.36914062, + -0.15234375, + -0.8125, + 0.17382812, + 0.048095703, + 0.5625, + -0.33789062, + 0.023071289, + -0.21972656, + 0.16015625, + 0.032958984, + -1.1171875, + -0.984375, + 0.83984375, + 0.009033203, + -0.042236328, + -0.46484375, + -0.08203125, + 0.44726562, + -0.765625, + -0.3984375, + -0.40820312, + -0.234375, + 0.044189453, + 0.119628906, + -0.7578125, + -0.55078125, + -0.4453125, + 0.7578125, + 0.34960938, + 0.96484375, + 0.35742188, + 0.36914062, + -0.35351562, + -0.36132812, + 1.109375, + 0.5859375, + 0.85546875, + -0.10644531, + -0.6953125, + -0.0066833496, + 0.042236328, + -0.06689453, + 0.36914062, + 0.9765625, + -0.3046875, + 0.59765625, + -0.6640625, + 0.21484375, + -0.07128906, + 1.1328125, + -0.51953125, + 0.86328125, + -0.11328125, + 0.15722656, + -0.36328125, + -0.04638672, + 1.4375, + 0.18457031, + -0.18359375, + 0.10595703, + -0.49023438, + -0.07324219, + -0.73046875, + -0.119140625, + 0.021118164, + 0.4921875, + -0.46875, + 0.28710938, + 0.3359375, + 0.11767578, + -0.2109375, + -0.14550781, + 0.39648438, + -0.27734375, + 0.48046875, + 0.12988281, + 0.45507812, + -0.375, + -0.84765625, + 0.25585938, + -0.36523438, + 0.8046875, + 0.42382812, + -0.24511719, + 0.54296875, + 0.71875, + 0.010009766, + -0.04296875, + 0.083984375, + -0.52734375, + 0.13964844, + -0.27539062, + -0.30273438, + 1.1484375, + -0.515625, + -0.19335938, + 0.58984375, + 0.049072266, + 0.703125, + -0.04272461, + 0.5078125, + 0.34960938, + -0.3359375, + -0.47460938, + 0.049316406, + 0.36523438, + 0.7578125, + -0.022827148, + -0.71484375, + 0.21972656, + 0.09716797, + -0.203125, + -0.36914062, + 1.34375, + 0.34179688, + 0.46679688, + 1.078125, + 0.26171875, + 0.41992188, + 0.22363281, + -0.515625, + -0.5703125, + 0.13378906, + 0.26757812, + -0.22558594, + -0.5234375, + 0.06689453, + 0.08251953, + -0.625, + 0.16796875, + 0.43164062, + -0.55859375, + 0.28125, + 0.078125, + 0.6328125, + 0.23242188, + -0.064941406, + -0.004486084, + -0.20703125, + 0.2734375, + 0.453125, + -0.734375, + 0.04272461, + 0.36132812, + -0.19628906, + -0.12402344, + 1.3515625, + 0.25585938, + 0.4921875, + -0.29296875, + -0.58984375, + 0.021240234, + -0.044677734, + 0.7578125, + -0.7890625, + 0.10253906, + -0.15820312, + -0.5078125, + -0.39453125, + -0.453125, + 0.35742188, + 0.921875, + 0.44335938, + -0.49804688, + 0.44335938, + 0.31445312, + 0.58984375, + -1.0078125, + -0.22460938, + 0.24121094, + 0.87890625, + 0.66015625, + -0.390625, + -0.05053711, + 0.059570312, + 0.36132812, + -0.00038719177, + -0.017089844, + 0.62890625, + 0.203125, + 0.17480469, + 0.025512695, + 0.47460938, + 0.3125, + 1.140625, + 0.32421875, + -0.057861328, + 0.36914062, + -0.7265625, + -0.51953125, + 0.26953125, + 0.42773438, + 0.064453125, + 0.6328125, + 0.27148438, + -0.11767578, + 0.66796875, + -0.38671875, + 0.5234375, + -0.59375, + 0.5078125, + 0.008239746, + -0.34179688, + -0.27539062, + 0.5234375, + 1.296875, + 0.29492188, + -0.010986328, + -0.41210938, + 0.59375, + 0.061767578, + -0.33398438, + -2.03125, + 0.87890625, + -0.010620117, + 0.53125, + 0.14257812, + -0.515625, + -1.03125, + 0.578125, + 0.1875, + 0.44335938, + -0.33203125, + -0.36328125, + -0.3203125, + 0.29296875, + -0.8203125, + 0.41015625, + -0.48242188, + 0.66015625, + 0.5625, + -0.16503906, + -0.54296875, + -0.38085938, + 0.26171875, + 0.62109375, + 0.29101562, + -0.31054688, + 0.23730469, + -0.8515625, + 0.5234375, + 0.15332031, + 0.52734375, + -0.079589844, + -0.080566406, + -0.15527344, + -0.022827148, + 0.030517578, + -0.1640625, + -0.421875, + 0.09716797, + 0.03930664, + -0.055908203, + -0.546875, + -0.47851562, + 0.091796875, + 0.32226562, + -0.94140625, + -0.04638672, + -1.203125, + -0.39648438, + 0.45507812, + 0.296875, + -0.45703125, + 0.37890625, + -0.122558594, + 0.28320312, + -0.01965332, + -0.11669922, + -0.34570312, + -0.53515625, + -0.091308594, + -0.9375, + -0.32617188, + 0.095214844, + -0.4765625, + 0.37890625, + -0.859375, + 1.1015625, + -0.08935547, + 0.46484375, + -0.19238281, + 0.7109375, + 0.040039062, + -0.5390625, + 0.22363281, + -0.70703125, + 0.4921875, + -0.119140625, + -0.26757812, + -0.08496094, + 0.0859375, + -0.00390625, + -0.013366699, + -0.03955078, + 0.07421875, + -0.13085938, + 0.29101562, + -0.12109375, + 0.45703125, + 0.021728516, + 0.38671875, + -0.3671875, + -0.52734375, + -0.115722656, + 0.125, + 0.5703125, + -1.234375, + 0.06298828, + -0.55859375, + 0.60546875, + 0.8125, + -0.0032958984, + -0.068359375, + -0.21191406, + 0.56640625, + 0.17285156, + -0.3515625, + 0.36328125, + -0.99609375, + 0.43554688, + -0.1015625, + 0.07080078, + -0.66796875, + 1.359375, + 0.41601562, + 0.15917969, + 0.17773438, + -0.28710938, + 0.021850586, + -0.46289062, + 0.17578125, + -0.03955078, + -0.026855469, + 0.5078125, + -0.65625, + 0.0012512207, + 0.044433594, + -0.18652344, + 0.4921875, + -0.75390625, + 0.0072021484, + 0.4375, + -0.31445312, + 0.20214844, + 0.15039062, + -0.63671875, + -0.296875, + -0.375, + -0.027709961, + 0.013427734, + 0.17089844, + 0.89453125, + 0.11621094, + -0.43945312, + -0.30859375, + 0.02709961, + 0.23242188, + -0.64453125, + -0.859375, + 0.22167969, + -0.023071289, + -0.052734375, + 0.3671875, + -0.18359375, + 0.81640625, + -0.11816406, + 0.028320312, + 0.19042969, + 0.012817383, + -0.43164062, + 0.55859375, + -0.27929688, + 0.14257812, + -0.140625, + -0.048583984, + -0.014526367, + 0.35742188, + 0.22753906, + 0.13183594, + 0.04638672, + 0.03930664, + -0.29296875, + -0.2109375, + -0.16308594, + -0.48046875, + -0.13378906, + -0.39257812, + 0.29296875, + -0.047851562, + -0.5546875, + 0.08300781, + -0.14941406, + -0.07080078, + 0.12451172, + 0.1953125, + -0.51171875, + -0.048095703, + 0.1953125, + -0.37695312, + 0.46875, + -0.084472656, + 0.19042969, + -0.39453125, + 0.69921875, + -0.0065307617, + 0.25390625, + -0.16992188, + -0.5078125, + 0.016845703, + 0.27929688, + -0.22070312, + 0.671875, + 0.18652344, + 0.25, + -0.046875, + -0.012023926, + -0.36523438, + 0.36523438, + -0.11279297, + 0.421875, + 0.079589844, + -0.100097656, + 0.37304688, + 0.29882812, + -0.10546875, + -0.36523438, + 0.040039062, + 0.546875, + 0.12890625, + -0.06542969, + -0.38085938, + -0.35742188, + -0.6484375, + -0.28515625, + 0.0107421875, + -0.055664062, + 0.45703125, + 0.33984375, + 0.26367188, + -0.23144531, + 0.012878418, + -0.875, + 0.11035156, + 0.33984375, + 0.203125, + 0.38867188, + 0.24902344, + -0.37304688, + -0.98046875, + -0.122558594, + -0.17871094, + -0.09277344, + 0.1796875, + 0.4453125, + -0.66796875, + 0.78515625, + 0.12988281, + 0.35546875, + 0.44140625, + 0.58984375, + 0.29492188, + 0.7734375, + -0.21972656, + -0.40234375, + -0.22265625, + 0.18359375, + 0.54296875, + 0.17382812, + 0.59375, + -0.390625, + -0.92578125, + -0.017456055, + -0.25, + 0.73828125, + 0.7578125, + -0.3828125, + -0.25976562, + 0.049072266, + 0.046875, + -0.3515625, + 0.30078125, + -1.03125, + -0.48828125, + 0.0017929077, + -0.26171875, + 0.20214844, + 0.29882812, + 0.064941406, + 0.21484375, + -0.55078125, + -0.021362305, + 0.12988281, + 0.27148438, + 0.38867188, + -0.19726562, + -0.55078125, + 0.1640625, + 0.32226562, + -0.72265625, + 0.36132812, + 1.21875, + -0.22070312, + -0.32421875, + -0.29882812, + 0.0024414062, + 0.19921875, + 0.734375, + 0.16210938, + 0.17871094, + -0.19140625, + 0.38476562, + -0.06591797, + -0.47070312, + -0.040039062, + -0.33007812, + -0.07910156, + -0.2890625, + 0.00970459, + 0.12695312, + -0.12060547, + -0.18847656, + 1.015625, + -0.032958984, + 0.12451172, + -0.38476562, + 0.063964844, + 1.0859375, + 0.067871094, + -0.24511719, + 0.125, + 0.10546875, + -0.22460938, + -0.29101562, + 0.24414062, + -0.017944336, + -0.15625, + -0.60546875, + -0.25195312, + -0.46875, + 0.80859375, + -0.34960938, + 0.42382812, + 0.796875, + 0.296875, + -0.067871094, + 0.39453125, + 0.07470703, + 0.033935547, + 0.24414062, + 0.32617188, + 0.023925781, + 0.73046875, + 0.2109375, + -0.43164062, + 0.14453125, + 0.63671875, + 0.21972656, + -0.1875, + -0.18066406, + -0.22167969, + -1.3359375, + 0.52734375, + -0.40625, + -0.12988281, + 0.17480469, + -0.18066406, + 0.58984375, + -0.32421875, + -0.13476562, + 0.39257812, + -0.19238281, + 0.068359375, + 0.7265625, + -0.7109375, + -0.125, + 0.328125, + 0.34179688, + -0.48828125, + -0.10058594, + -0.83984375, + 0.30273438, + 0.008239746, + -1.390625, + 0.171875, + 0.34960938, + 0.44921875, + 0.22167969, + 0.60546875, + -0.36914062, + -0.028808594, + -0.19921875, + 0.6875, + 0.52734375, + -0.07421875, + 0.35546875, + 0.546875, + 0.08691406, + 0.23339844, + -0.984375, + -0.20507812, + 0.08544922, + 0.453125, + -0.07421875, + -0.953125, + 0.74609375, + -0.796875, + 0.47851562, + 0.81640625, + -0.44921875, + -0.33398438, + -0.54296875, + 0.46484375, + -0.390625, + -0.24121094, + -0.0115356445, + 1.1328125, + 1.0390625, + 0.6484375, + 0.35742188, + -0.29492188, + -0.0007095337, + -0.060302734, + 0.21777344, + 0.15136719, + -0.6171875, + 0.11328125, + -0.025878906, + 0.19238281, + 0.140625, + 0.171875, + 0.25195312, + 0.10546875, + 0.0008354187, + -0.13476562, + -0.26953125, + 0.025024414, + -0.28320312, + -0.107910156, + 1.015625, + 0.05493164, + -0.12988281, + 0.30859375, + 0.22558594, + -0.60546875, + 0.11328125, + -1.203125, + 0.6484375, + 0.087402344, + 0.32226562, + 0.63671875, + -0.07714844, + -1.390625, + -0.71875, + -0.34179688, + -0.10546875, + -0.37304688, + -0.09863281, + -0.41210938, + -0.14941406, + 0.41210938, + -0.20898438, + 0.18261719, + 0.67578125, + 0.41601562, + 0.32617188, + 0.2421875, + -0.14257812, + -0.6796875, + 0.01953125, + 0.34179688, + 0.20800781, + -0.123046875, + 0.087402344, + 0.85546875, + 0.33984375, + 0.33203125, + -0.68359375, + 0.44921875, + 0.50390625, + 0.083496094, + 0.10888672, + -0.09863281, + 0.55078125, + 0.09765625, + -0.50390625, + 0.13378906, + -0.29882812, + 0.030761719, + -0.64453125, + 0.22949219, + 0.43945312, + 0.16503906, + 0.10888672, + -0.12792969, + -0.039794922, + -0.111328125, + -0.35742188, + 0.053222656, + -0.78125, + -0.4375, + 0.359375, + -0.88671875, + -0.21972656, + -0.053710938, + 0.91796875, + -0.10644531, + 0.55859375, + -0.7734375, + 0.5078125, + 0.46484375, + 0.32226562, + 0.16796875, + -0.28515625, + 0.045410156, + -0.45117188, + 0.38867188, + -0.33398438, + -0.5234375, + 0.296875, + 0.6015625, + 0.3515625, + -0.734375, + 0.3984375, + -0.08251953, + 0.359375, + -0.28515625, + -0.88671875, + 0.0051879883, + 0.045166016, + -0.7421875, + -0.36523438, + 0.140625, + 0.18066406, + -0.171875, + -0.15625, + -0.53515625, + 0.2421875, + -0.19140625, + -0.18066406, + 0.25390625, + 0.6875, + -0.01965332, + -0.33203125, + 0.29492188, + 0.107421875, + -0.048339844, + -0.82421875, + 0.52734375, + 0.78125, + 0.8203125, + -0.90625, + 0.765625, + 0.0390625, + 0.045410156, + 0.26367188, + -0.14355469, + -0.26367188, + 0.390625, + -0.10888672, + 0.33007812, + -0.5625, + 0.08105469, + -0.13769531, + 0.8515625, + -0.14453125, + 0.77734375, + -0.48046875, + -0.3515625, + -0.25390625, + -0.09277344, + 0.23925781, + -0.022338867, + -0.45898438, + 0.36132812, + -0.23828125, + 0.265625, + -0.48632812, + -0.46875, + -0.75390625, + 1.3125, + 0.78125, + -0.63671875, + -1.21875, + 0.5078125, + -0.27734375, + -0.118652344, + 0.041992188, + -0.14648438, + -0.8046875, + 0.21679688, + -0.79296875, + 0.28320312, + -0.09667969, + 0.42773438, + 0.49414062, + 0.44726562, + 0.21972656, + -0.02746582, + -0.03540039, + -0.14941406, + -0.515625, + -0.27929688, + 0.9609375, + -0.007598877, + 0.34765625, + -0.060546875, + -0.44726562, + 0.7421875, + 0.15332031, + 0.45117188, + -0.4921875, + 0.07080078, + 0.5625, + 0.3984375, + -0.20019531, + 0.014892578, + 0.63671875, + -0.0071411133, + 0.016357422, + 1.0625, + 0.049316406, + 0.18066406, + 0.09814453, + -0.52734375, + -0.359375, + -0.072265625, + -0.41992188, + 0.39648438, + 0.38671875, + -0.30273438, + -0.056640625, + -0.640625, + -0.44921875, + 0.49414062, + 0.29101562, + 0.49609375, + 0.40429688, + -0.10205078, + 0.49414062, + -0.28125, + -0.12695312, + -0.0022735596, + -0.37304688, + 0.122558594, + 0.07519531, + -0.12597656, + -0.38085938, + -0.19824219, + -0.40039062, + 0.56640625, + -1.140625, + -0.515625, + -0.17578125, + -0.765625, + -0.43945312, + 0.3359375, + -0.24707031, + 0.32617188, + -0.45117188, + -0.37109375, + 0.45117188, + -0.27539062, + -0.38867188, + 0.09082031, + 0.17675781, + 0.49414062, + 0.19921875, + 0.17480469, + 0.8515625, + -0.23046875, + -0.234375, + -0.28515625, + 0.10253906, + 0.29101562, + -0.3359375, + -0.203125, + 0.6484375, + 0.11767578, + -0.20214844, + -0.42382812, + 0.26367188, + 0.6328125, + 0.0059509277, + 0.08691406, + -1.5625, + -0.43554688, + 0.17675781, + 0.091796875, + -0.5234375, + -0.09863281, + 0.20605469, + 0.16601562, + -0.578125, + 0.017700195, + 0.41015625, + 1.03125, + -0.55078125, + 0.21289062, + -0.35351562, + 0.24316406, + -0.123535156, + 0.11035156, + -0.48242188, + -0.34179688, + 0.45117188, + 0.3125, + -0.071777344, + 0.12792969, + 0.55859375, + 0.063964844, + -0.21191406, + 0.01965332, + -1.359375, + -0.21582031, + -0.019042969, + 0.16308594, + -0.3671875, + -0.40625, + -1.0234375, + -0.21289062, + 0.24023438, + -0.28125, + 0.26953125, + -0.14550781, + -0.087890625, + 0.16113281, + -0.49804688, + -0.17675781, + -0.890625, + 0.27929688, + 0.484375, + 0.27148438, + 0.11816406, + 0.83984375, + 0.029052734, + -0.890625, + 0.66796875, + 0.78515625, + -0.953125, + 0.49414062, + -0.546875, + 0.106933594, + -0.08251953, + 0.2890625, + -0.1484375, + -0.85546875, + 0.32421875, + -0.0040893555, + -0.16601562, + -0.16699219, + 0.24414062, + -0.5078125, + 0.25390625, + -0.10253906, + 0.15625, + 0.140625, + -0.27539062, + -0.546875, + -0.5546875, + -0.71875, + 0.37304688, + 0.060058594, + -0.076171875, + 0.44921875, + 0.06933594, + -0.28710938, + -0.22949219, + 0.17578125, + 0.09814453, + 0.4765625, + -0.95703125, + -0.03540039, + 0.21289062, + -0.7578125, + -0.07373047, + 0.10546875, + 0.07128906, + 0.76171875, + 0.4296875, + -0.09375, + 0.27539062, + -0.55078125, + 0.29882812, + -0.42382812, + 0.32617188, + -0.39648438, + 0.12451172, + 0.16503906, + -0.22460938, + -0.65625, + -0.022094727, + 0.61328125, + -0.024780273, + 0.62109375, + -0.033447266, + 0.515625, + 0.12890625, + -0.21875, + -0.08642578, + 0.49804688, + -0.2265625, + -0.29296875, + 0.19238281, + 0.3515625, + -1.265625, + 0.57421875, + 0.20117188, + -0.28320312, + 0.1953125, + -0.30664062, + 0.2265625, + -0.11230469, + 0.83984375, + 0.111328125, + 0.265625, + 0.71484375, + -0.625, + 0.38867188, + 0.47070312, + -0.32617188, + -0.171875, + 1.0078125, + 0.19726562, + -0.118652344, + 0.63671875, + -0.068359375, + -0.25585938, + 0.4140625, + -0.29296875, + 0.21386719, + -0.064453125, + 0.15820312, + -0.89453125, + -0.16308594, + 0.48046875, + 0.14648438, + -0.5703125, + 0.84765625, + -0.19042969, + 0.03515625, + 0.42578125, + -0.27539062, + -0.5390625, + 0.95703125, + 0.2734375, + 0.16699219, + -0.328125, + 0.11279297, + 0.003250122, + 0.47265625, + -0.31640625, + 0.546875, + 0.55859375, + 0.06933594, + -0.61328125, + -0.16210938, + -0.375, + 0.100097656, + -0.088378906, + 0.12695312, + 0.079589844, + 0.123535156, + -1.0078125, + 0.6875, + 0.022949219, + -0.40039062, + -0.09863281, + 0.29101562, + -1.2890625, + -0.20996094, + 0.36328125, + -0.3515625, + 0.7890625, + 0.12207031, + 0.48046875, + -0.13671875, + -0.041015625, + 0.19824219, + 0.19921875, + 0.01171875, + -0.37695312, + -0.62890625, + 0.9375, + -0.671875, + 0.24609375, + 0.6484375, + -0.29101562, + 0.076171875, + 0.62109375, + -0.5546875, + 0.36523438, + 0.75390625, + -0.19140625, + -0.875, + -0.8203125, + -0.24414062, + -0.625, + 0.1796875, + -0.40039062, + 0.25390625, + -0.14550781, + -0.21679688, + -0.828125, + 0.3359375, + 0.43554688, + 0.55078125, + -0.44921875, + -0.28710938, + 0.24023438, + 0.18066406, + -0.6953125, + 0.020385742, + -0.11376953, + 0.13867188, + -0.92578125, + 0.33398438, + -0.328125, + 0.78125, + -0.45507812, + -0.07470703, + 0.34179688, + 0.07080078, + 0.76171875, + 0.37890625, + -0.10644531, + 0.90234375, + -0.21875, + -0.15917969, + -0.36132812, + 0.2109375, + -0.45703125, + -0.76953125, + 0.21289062, + 0.26367188, + 0.49804688, + 0.35742188, + -0.20019531, + 0.31054688, + 0.34179688, + 0.17089844, + -0.15429688, + 0.39648438, + -0.5859375, + 0.20996094, + -0.40039062, + 0.5703125, + -0.515625, + 0.5234375, + 0.049560547, + 0.328125, + 0.24804688, + 0.42578125, + 0.609375, + 0.19238281, + 0.27929688, + 0.19335938, + 0.78125, + -0.9921875, + 0.23925781, + -1.3828125, + -0.22949219, + -0.578125, + -0.13964844, + -0.17382812, + -0.011169434, + 0.26171875, + -0.73046875, + -1.4375, + 0.6953125, + -0.7421875, + 0.052246094, + 0.12207031, + 1.3046875, + 0.38867188, + 0.040283203, + -0.546875, + -0.0021514893, + 0.18457031, + -0.5546875, + -0.51171875, + -0.16308594, + -0.104003906, + -0.38867188, + -0.20996094, + -0.8984375, + 0.6015625, + -0.30078125, + -0.13769531, + 0.16113281, + 0.58203125, + -0.23730469, + -0.125, + -1.0234375, + 0.875, + -0.7109375, + 0.29101562, + 0.09667969, + -0.3203125, + -0.48046875, + 0.37890625, + 0.734375, + -0.28710938, + -0.29882812, + -0.05493164, + 0.34765625, + -0.84375, + 0.65625, + 0.578125, + -0.20019531, + 0.13769531, + 0.10058594, + -0.37109375, + 0.36523438, + -0.22167969, + 0.72265625, + ], + "inputTextTokenCount": 6, + }, + ], + "amazon.titan-text-express-v1::{ Malformed Request Body": [ + { + "Content-Type": "application/json", + "x-amzn-RequestId": "e72d1b46-9f16-4bf0-8eee-f7778f32e5a5", + "x-amzn-ErrorType": "ValidationException:http://internal.amazon.com/coral/com.amazon.bedrock/", + }, + 400, + {"message": "Malformed input request, please reformat your input and try again."}, + ], + "amazon.titan-embed-g1-text-02::{ Malformed Request Body": [ + { + "Content-Type": "application/json", + "x-amzn-RequestId": "b3646569-18c5-4173-a9fa-bbe9c648f636", + "x-amzn-ErrorType": "ValidationException:http://internal.amazon.com/coral/com.amazon.bedrock/", + }, + 400, + {"message": "Malformed input request, please reformat your input and try again."}, + ], +} + + +MODEL_PATH_RE = re.compile(r"/model/([^/]+)/invoke") + + +def simple_get(self): + content_len = int(self.headers.get("content-length")) + body = self.rfile.read(content_len).decode("utf-8") + try: + content = json.loads(body) + except Exception: + content = body + + stream = self.path.endswith("invoke-with-response-stream") + model = MODEL_PATH_RE.match(self.path).group(1) + prompt = extract_shortened_prompt(content, model) + if not prompt: + self.send_response(500) + self.end_headers() + self.wfile.write("Could not parse prompt.".encode("utf-8")) + return + + headers, status_code, response = ({}, 0, "") + if stream: + for k, v in STREAMED_RESPONSES.items(): + if prompt.startswith(k): + headers, status_code, response = v + break + if not response: + for k, v in RESPONSES.items(): + # Only look for error responses returned immediately instead of in a stream + if prompt.startswith(k) and v[1] >= 400: + headers, status_code, response = v + stream = False # Response will not be streamed + break + else: + for k, v in RESPONSES.items(): + if prompt.startswith(k): + headers, status_code, response = v + break + + if not response: + # If no matches found + self.send_response(500) + self.end_headers() + self.wfile.write(("Unknown Prompt:\n%s" % prompt).encode("utf-8")) + return + + if stream: + # Send response code + self.send_response(status_code) + + # Send headers + for k, v in headers.items(): + self.send_header(k, v) + self.end_headers() + + # Send response body + for resp in response: + self.wfile.write(bytes.fromhex(resp)) + else: + # Send response code + self.send_response(status_code) + + # Send headers + for k, v in headers.items(): + self.send_header(k, v) + self.end_headers() + + # Send response body + response_body = json.dumps(response).encode("utf-8") + + if "Malformed Body" in prompt: + # Remove end of response to make invalid JSON + response_body = response_body[:-4] + + self.wfile.write(response_body) + return + + +def extract_shortened_prompt(content, model): + if isinstance(content, str): + prompt = content + elif "messages" in content: + prompt = content["messages"][0].get("content") + else: + prompt = content.get("inputText", "") or content.get("prompt", "") or content.get("texts", [""])[0] + # Sometimes there are leading whitespaces in the prompt. + prompt = prompt.strip() + prompt = "::".join((model, prompt)) # Prepend model name to prompt key to keep separate copies + return prompt.lstrip().split("\n")[0] + + +class MockExternalBedrockServer(MockExternalHTTPServer): + # To use this class in a test one needs to start and stop this server + # before and after making requests to the test app that makes the external + # calls. + + def __init__(self, handler=simple_get, port=None, *args, **kwargs): + super(MockExternalBedrockServer, self).__init__(handler=handler, port=port, *args, **kwargs) + + +if __name__ == "__main__": + # Use this to sort dict for easier future incremental updates + print("RESPONSES = %s" % dict(sorted(RESPONSES.items(), key=lambda i: (i[1][1], i[0])))) + + with MockExternalBedrockServer() as server: + print("MockExternalBedrockServer serving on port %s" % str(server.port)) + while True: + pass # Serve forever diff --git a/tests/external_botocore/_test_bedrock_chat_completion.py b/tests/external_botocore/_test_bedrock_chat_completion.py new file mode 100644 index 000000000..f0f40a799 --- /dev/null +++ b/tests/external_botocore/_test_bedrock_chat_completion.py @@ -0,0 +1,1310 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +chat_completion_payload_templates = { + "amazon.titan-text-express-v1": '{ "inputText": "%s", "textGenerationConfig": {"temperature": %f, "maxTokenCount": %d }}', + "ai21.j2-mid-v1": '{"prompt": "%s", "temperature": %f, "maxTokens": %d}', + "anthropic.claude-instant-v1": '{"prompt": "Human: %s Assistant:", "temperature": %f, "max_tokens_to_sample": %d}', + "cohere.command-text-v14": '{"prompt": "%s", "temperature": %f, "max_tokens": %d}', + "meta.llama2-13b-chat-v1": '{"prompt": "%s", "temperature": %f, "max_gen_len": %d}', +} + +chat_completion_expected_events = { + "amazon.titan-text-express-v1": [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "span_id": None, + "trace_id": "trace-id", + "request_id": "81508a1c-33a8-4294-8743-f0c629af2f49", + "duration": None, # Response time varies each test run + "request.model": "amazon.titan-text-express-v1", + "response.model": "amazon.titan-text-express-v1", + "request.temperature": 0.7, + "request.max_tokens": 100, + "response.choices.finish_reason": "FINISH", + "vendor": "bedrock", + "ingest_source": "Python", + "response.number_of_messages": 2, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": "81508a1c-33a8-4294-8743-f0c629af2f49", + "span_id": None, + "trace_id": "trace-id", + "content": "What is 212 degrees Fahrenheit converted to Celsius?", + "role": "user", + "completion_id": None, + "sequence": 0, + "response.model": "amazon.titan-text-express-v1", + "vendor": "bedrock", + "ingest_source": "Python", + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": "81508a1c-33a8-4294-8743-f0c629af2f49", + "span_id": None, + "trace_id": "trace-id", + "content": "\n1 degree Fahrenheit is 0.56 Celsius. Therefore, 212 degree Fahrenheit in Celsius would be 115.42.", + "role": "assistant", + "completion_id": None, + "sequence": 1, + "response.model": "amazon.titan-text-express-v1", + "vendor": "bedrock", + "ingest_source": "Python", + "is_response": True, + }, + ), + ], + "ai21.j2-mid-v1": [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "span_id": None, + "trace_id": "trace-id", + "request_id": "228ee63f-4eca-4b7d-b679-bc920de63525", + "response_id": "1234", + "duration": None, # Response time varies each test run + "request.model": "ai21.j2-mid-v1", + "response.model": "ai21.j2-mid-v1", + "request.temperature": 0.7, + "request.max_tokens": 100, + "response.choices.finish_reason": "endoftext", + "vendor": "bedrock", + "ingest_source": "Python", + "response.number_of_messages": 2, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": "1234-0", + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": "228ee63f-4eca-4b7d-b679-bc920de63525", + "span_id": None, + "trace_id": "trace-id", + "content": "What is 212 degrees Fahrenheit converted to Celsius?", + "role": "user", + "completion_id": None, + "sequence": 0, + "response.model": "ai21.j2-mid-v1", + "vendor": "bedrock", + "ingest_source": "Python", + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": "1234-1", + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": "228ee63f-4eca-4b7d-b679-bc920de63525", + "span_id": None, + "trace_id": "trace-id", + "content": "\n212 degrees Fahrenheit is equal to 100 degrees Celsius.", + "role": "assistant", + "completion_id": None, + "sequence": 1, + "response.model": "ai21.j2-mid-v1", + "vendor": "bedrock", + "ingest_source": "Python", + "is_response": True, + }, + ), + ], + "anthropic.claude-instant-v1": [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "span_id": None, + "trace_id": "trace-id", + "request_id": "6a886158-b39f-46ce-b214-97458ab76f2f", + "duration": None, # Response time varies each test run + "request.model": "anthropic.claude-instant-v1", + "response.model": "anthropic.claude-instant-v1", + "request.temperature": 0.7, + "request.max_tokens": 100, + "response.choices.finish_reason": "max_tokens", + "vendor": "bedrock", + "ingest_source": "Python", + "response.number_of_messages": 2, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": "6a886158-b39f-46ce-b214-97458ab76f2f", + "span_id": None, + "trace_id": "trace-id", + "content": "Human: What is 212 degrees Fahrenheit converted to Celsius? Assistant:", + "role": "user", + "completion_id": None, + "sequence": 0, + "response.model": "anthropic.claude-instant-v1", + "vendor": "bedrock", + "ingest_source": "Python", + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": "6a886158-b39f-46ce-b214-97458ab76f2f", + "span_id": None, + "trace_id": "trace-id", + "content": " Okay, here are the steps to convert 212 degrees Fahrenheit to Celsius:\n\n1) The formula to convert between Fahrenheit and Celsius is:\n C = (F - 32) * 5/9\n\n2) Plug in 212 degrees Fahrenheit for F:\n C = (212 - 32) * 5/9\n C = 180 * 5/9\n C = 100\n\n3) Therefore, 212 degrees Fahrenheit converted to Celsius is 100 degrees Celsius.", + "role": "assistant", + "completion_id": None, + "sequence": 1, + "response.model": "anthropic.claude-instant-v1", + "vendor": "bedrock", + "ingest_source": "Python", + "is_response": True, + }, + ), + ], + "cohere.command-text-v14": [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "span_id": None, + "trace_id": "trace-id", + "request_id": "12912a17-aa13-45f3-914c-cc82166f3601", + "response_id": None, # UUID that varies with each run + "duration": None, # Response time varies each test run + "request.model": "cohere.command-text-v14", + "response.model": "cohere.command-text-v14", + "request.temperature": 0.7, + "request.max_tokens": 100, + "response.choices.finish_reason": "MAX_TOKENS", + "vendor": "bedrock", + "ingest_source": "Python", + "response.number_of_messages": 2, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": "12912a17-aa13-45f3-914c-cc82166f3601", + "span_id": None, + "trace_id": "trace-id", + "content": "What is 212 degrees Fahrenheit converted to Celsius?", + "role": "user", + "completion_id": None, + "sequence": 0, + "response.model": "cohere.command-text-v14", + "vendor": "bedrock", + "ingest_source": "Python", + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": "12912a17-aa13-45f3-914c-cc82166f3601", + "span_id": None, + "trace_id": "trace-id", + "content": " To convert from Fahrenheit to Celsius, you can use the following formula:\n\nCelsius = (Fahrenheit - 32) * 5/9\n\nIn this case, 212 degrees Fahrenheit is converted to Celsius as follows:\n\nCelsius = (212 - 32) * 5/9 = (180) * 5/9 = (180/9) = 20 degrees Celsius\n\nTherefore, 212 degrees Fahrenheit is equivalent to 20 degrees Celsius.\n\nIt's important to note that", + "role": "assistant", + "completion_id": None, + "sequence": 1, + "response.model": "cohere.command-text-v14", + "vendor": "bedrock", + "ingest_source": "Python", + "is_response": True, + }, + ), + ], + "meta.llama2-13b-chat-v1": [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "span_id": None, + "trace_id": "trace-id", + "request_id": "a168214d-742d-4244-bd7f-62214ffa07df", + "duration": None, # Response time varies each test run + "request.model": "meta.llama2-13b-chat-v1", + "response.model": "meta.llama2-13b-chat-v1", + "request.temperature": 0.7, + "request.max_tokens": 100, + "response.choices.finish_reason": "stop", + "vendor": "bedrock", + "ingest_source": "Python", + "response.number_of_messages": 2, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": "a168214d-742d-4244-bd7f-62214ffa07df", + "span_id": None, + "trace_id": "trace-id", + "content": "What is 212 degrees Fahrenheit converted to Celsius?", + "role": "user", + "completion_id": None, + "sequence": 0, + "response.model": "meta.llama2-13b-chat-v1", + "vendor": "bedrock", + "ingest_source": "Python", + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": "a168214d-742d-4244-bd7f-62214ffa07df", + "span_id": None, + "trace_id": "trace-id", + "content": "\n\n212°F = ?°C\n\nPlease help! I'm stuck!\n\nThank you!\n\nI hope this is the correct place to ask this question. Please let me know if it isn't.\n\nI appreciate your help!\n\nBest regards,\n\n[Your Name]", + "role": "assistant", + "completion_id": None, + "sequence": 1, + "response.model": "meta.llama2-13b-chat-v1", + "vendor": "bedrock", + "ingest_source": "Python", + "is_response": True, + }, + ), + ], +} +chat_completion_langchain_expected_streaming_events = { + "amazon.titan-text-express-v1": [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "span_id": None, + "trace_id": "trace-id", + "request_id": "884db5c9-18ab-4f27-8892-33656176a2e6", + "duration": None, # Response time varies each test run + "request.model": "amazon.titan-text-express-v1", + "response.model": "amazon.titan-text-express-v1", + "response.choices.finish_reason": "FINISH", + "vendor": "bedrock", + "ingest_source": "Python", + "response.number_of_messages": 2, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": "884db5c9-18ab-4f27-8892-33656176a2e6", + "span_id": None, + "trace_id": "trace-id", + "content": "\n\nUser: The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n\nCurrent conversation:\n\nHuman: Hi there!\nAI:\n\nBot:", + "role": "user", + "completion_id": None, + "sequence": 0, + "response.model": "amazon.titan-text-express-v1", + "vendor": "bedrock", + "ingest_source": "Python", + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": "884db5c9-18ab-4f27-8892-33656176a2e6", + "span_id": None, + "trace_id": "trace-id", + "content": " Hello, how can I help you today?", + "role": "assistant", + "completion_id": None, + "sequence": 1, + "response.model": "amazon.titan-text-express-v1", + "vendor": "bedrock", + "ingest_source": "Python", + "is_response": True, + }, + ), + ], + "anthropic.claude-instant-v1": [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "span_id": None, + "trace_id": "trace-id", + "request_id": "1a72a1f6-310f-469c-af1d-2c59eb600089", + "duration": None, # Response time varies each test run + "request.model": "anthropic.claude-instant-v1", + "response.model": "anthropic.claude-instant-v1", + "vendor": "bedrock", + "ingest_source": "Python", + "response.number_of_messages": 2, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": "1a72a1f6-310f-469c-af1d-2c59eb600089", + "span_id": None, + "trace_id": "trace-id", + "content": "The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n\nCurrent conversation:\n\nHuman: Hi there!\nAI:", + "role": "user", + "completion_id": None, + "sequence": 0, + "response.model": "anthropic.claude-instant-v1", + "vendor": "bedrock", + "ingest_source": "Python", + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": "1a72a1f6-310f-469c-af1d-2c59eb600089", + "span_id": None, + "trace_id": "trace-id", + "content": "Hello! My name is Claude.\n\nH: Nice to meet you Claude. Can you tell me a bit about yourself?", + "role": "assistant", + "completion_id": None, + "sequence": 1, + "response.model": "anthropic.claude-instant-v1", + "vendor": "bedrock", + "ingest_source": "Python", + "is_response": True, + }, + ), + ], + "meta.llama2-13b-chat-v1": [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "span_id": None, + "trace_id": "trace-id", + "request_id": "cce6b34c-812c-4f97-8885-515829aa9639", + "duration": None, # Response time varies each test run + "request.model": "meta.llama2-13b-chat-v1", + "response.model": "meta.llama2-13b-chat-v1", + "response.choices.finish_reason": "stop", + "vendor": "bedrock", + "ingest_source": "Python", + "response.number_of_messages": 2, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": "cce6b34c-812c-4f97-8885-515829aa9639", + "span_id": None, + "trace_id": "trace-id", + "content": "[INST] The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n\nCurrent conversation:\n\nHuman: Hi there!\nAI: [/INST]", + "role": "user", + "completion_id": None, + "sequence": 0, + "response.model": "meta.llama2-13b-chat-v1", + "vendor": "bedrock", + "ingest_source": "Python", + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": "cce6b34c-812c-4f97-8885-515829aa9639", + "span_id": None, + "trace_id": "trace-id", + "content": " Hello! It's great to talk to you! I'm an AI, and I'm here to help answer any questions you might have. What's on your mind? 🤔 Do you have a specific topic you'd like to discuss, or do you just want to chat? 💬 I'm all ears! 👂", + "role": "assistant", + "completion_id": None, + "sequence": 1, + "response.model": "meta.llama2-13b-chat-v1", + "vendor": "bedrock", + "ingest_source": "Python", + "is_response": True, + }, + ), + ], +} +chat_completion_langchain_expected_events = { + "amazon.titan-text-express-v1": [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "span_id": None, + "trace_id": "trace-id", + "request_id": "884db5c9-18ab-4f27-8892-33656176a2e6", + "duration": None, # Response time varies each test run + "request.model": "amazon.titan-text-express-v1", + "response.model": "amazon.titan-text-express-v1", + "response.choices.finish_reason": "FINISH", + "vendor": "bedrock", + "ingest_source": "Python", + "response.number_of_messages": 2, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": "884db5c9-18ab-4f27-8892-33656176a2e6", + "span_id": None, + "trace_id": "trace-id", + "content": "\n\nUser: The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n\nCurrent conversation:\n\nHuman: Hi there!\nAI:\n\nBot:", + "role": "user", + "completion_id": None, + "sequence": 0, + "response.model": "amazon.titan-text-express-v1", + "vendor": "bedrock", + "ingest_source": "Python", + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": "884db5c9-18ab-4f27-8892-33656176a2e6", + "span_id": None, + "trace_id": "trace-id", + "content": " Hello, how can I help you today?", + "role": "assistant", + "completion_id": None, + "sequence": 1, + "response.model": "amazon.titan-text-express-v1", + "vendor": "bedrock", + "ingest_source": "Python", + "is_response": True, + }, + ), + ], + "anthropic.claude-instant-v1": [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "span_id": None, + "trace_id": "trace-id", + "request_id": "1a72a1f6-310f-469c-af1d-2c59eb600089", + "duration": None, # Response time varies each test run + "request.model": "anthropic.claude-instant-v1", + "response.model": "anthropic.claude-instant-v1", + "response.choices.finish_reason": "end_turn", + "vendor": "bedrock", + "ingest_source": "Python", + "response.number_of_messages": 2, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": "1a72a1f6-310f-469c-af1d-2c59eb600089", + "span_id": None, + "trace_id": "trace-id", + "content": "The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n\nCurrent conversation:\n\nHuman: Hi there!\nAI:", + "role": "user", + "completion_id": None, + "sequence": 0, + "response.model": "anthropic.claude-instant-v1", + "vendor": "bedrock", + "ingest_source": "Python", + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": "1a72a1f6-310f-469c-af1d-2c59eb600089", + "span_id": None, + "trace_id": "trace-id", + "content": "[{'type': 'text', 'text': \"Hello! It's nice to meet you.\"}]", + "role": "assistant", + "completion_id": None, + "sequence": 1, + "response.model": "anthropic.claude-instant-v1", + "vendor": "bedrock", + "ingest_source": "Python", + "is_response": True, + }, + ), + ], + "meta.llama2-13b-chat-v1": [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "span_id": None, + "trace_id": "trace-id", + "request_id": "cce6b34c-812c-4f97-8885-515829aa9639", + "duration": None, # Response time varies each test run + "request.model": "meta.llama2-13b-chat-v1", + "response.model": "meta.llama2-13b-chat-v1", + "response.choices.finish_reason": "stop", + "vendor": "bedrock", + "ingest_source": "Python", + "response.number_of_messages": 2, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": "cce6b34c-812c-4f97-8885-515829aa9639", + "span_id": None, + "trace_id": "trace-id", + "content": "[INST] The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n\nCurrent conversation:\n\nHuman: Hi there!\nAI: [/INST]", + "role": "user", + "completion_id": None, + "sequence": 0, + "response.model": "meta.llama2-13b-chat-v1", + "vendor": "bedrock", + "ingest_source": "Python", + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": "cce6b34c-812c-4f97-8885-515829aa9639", + "span_id": None, + "trace_id": "trace-id", + "content": " Hello! It's great to talk to you! I'm an AI, and I'm here to help answer any questions you might have. What's on your mind? 🤔 Do you have a specific topic you'd like to discuss, or is there something you'd like to know? 🤓 I'm all ears! 👂", + "role": "assistant", + "completion_id": None, + "sequence": 1, + "response.model": "meta.llama2-13b-chat-v1", + "vendor": "bedrock", + "ingest_source": "Python", + "is_response": True, + }, + ), + ], +} + +chat_completion_streaming_expected_events = { + "amazon.titan-text-express-v1": [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "span_id": None, + "trace_id": "trace-id", + "request_id": "b427270f-371a-458d-81b6-a05aafb2704c", + "duration": None, # Response time varies each test run + "request.model": "amazon.titan-text-express-v1", + "response.model": "amazon.titan-text-express-v1", + "request.temperature": 0.7, + "request.max_tokens": 100, + "response.choices.finish_reason": "FINISH", + "vendor": "bedrock", + "ingest_source": "Python", + "response.number_of_messages": 2, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, # UUID that varies with each run + "request_id": "b427270f-371a-458d-81b6-a05aafb2704c", + "span_id": None, + "trace_id": "trace-id", + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "content": "What is 212 degrees Fahrenheit converted to Celsius?", + "role": "user", + "completion_id": None, + "sequence": 0, + "response.model": "amazon.titan-text-express-v1", + "vendor": "bedrock", + "ingest_source": "Python", + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, # UUID that varies with each run + "request_id": "b427270f-371a-458d-81b6-a05aafb2704c", + "span_id": None, + "trace_id": "trace-id", + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "content": "\n1 degree Fahrenheit is 0.56 degrees Celsius. Therefore, 212 degree Fahrenheit in Celsius would be 115.72.", + "role": "assistant", + "completion_id": None, + "sequence": 1, + "response.model": "amazon.titan-text-express-v1", + "vendor": "bedrock", + "ingest_source": "Python", + "is_response": True, + }, + ), + ], + "anthropic.claude-instant-v1": [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "span_id": None, + "trace_id": "trace-id", + "request_id": "a645548f-0b3a-47ce-a675-f51e6e9037de", + "duration": None, # Response time varies each test run + "request.model": "anthropic.claude-instant-v1", + "response.model": "anthropic.claude-instant-v1", + "request.temperature": 0.7, + "request.max_tokens": 100, + "response.choices.finish_reason": "stop_sequence", + "vendor": "bedrock", + "ingest_source": "Python", + "response.number_of_messages": 2, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, # UUID that varies with each run + "request_id": "a645548f-0b3a-47ce-a675-f51e6e9037de", + "span_id": None, + "trace_id": "trace-id", + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "content": "Human: What is 212 degrees Fahrenheit converted to Celsius? Assistant:", + "role": "user", + "completion_id": None, + "sequence": 0, + "response.model": "anthropic.claude-instant-v1", + "vendor": "bedrock", + "ingest_source": "Python", + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, # UUID that varies with each run + "request_id": "a645548f-0b3a-47ce-a675-f51e6e9037de", + "span_id": None, + "trace_id": "trace-id", + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "content": " Here are the steps to convert 212 degrees Fahrenheit to Celsius:\n\n1) The formula to convert between Fahrenheit and Celsius is:\n C = (F - 32) * 5/9\n\n2) Plug in 212 degrees Fahrenheit for F:\n C = (212 - 32) * 5/9\n C = 180 * 5/9\n C = 100\n\n3) Therefore, 212 degrees Fahrenheit is equal to 100 degrees Celsius.", + "role": "assistant", + "completion_id": None, + "sequence": 1, + "response.model": "anthropic.claude-instant-v1", + "vendor": "bedrock", + "ingest_source": "Python", + "is_response": True, + }, + ), + ], + "cohere.command-text-v14": [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "span_id": None, + "trace_id": "trace-id", + "request_id": "4f8ab6c5-42d1-4e35-9573-30f9f41f821e", + "response_id": None, # UUID that varies with each run + "duration": None, # Response time varies each test run + "request.model": "cohere.command-text-v14", + "response.model": "cohere.command-text-v14", + "request.temperature": 0.7, + "request.max_tokens": 100, + "response.choices.finish_reason": "COMPLETE", + "vendor": "bedrock", + "ingest_source": "Python", + "response.number_of_messages": 2, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, # UUID that varies with each run + "request_id": "4f8ab6c5-42d1-4e35-9573-30f9f41f821e", + "span_id": None, + "trace_id": "trace-id", + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "content": "What is 212 degrees Fahrenheit converted to Celsius?", + "role": "user", + "completion_id": None, + "sequence": 0, + "response.model": "cohere.command-text-v14", + "vendor": "bedrock", + "ingest_source": "Python", + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, # UUID that varies with each run + "request_id": "4f8ab6c5-42d1-4e35-9573-30f9f41f821e", + "span_id": None, + "trace_id": "trace-id", + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "content": " To convert Fahrenheit to Celsius, you can use the formula:\n\nCelsius = (Fahrenheit - 32) * 5/9\n\nIn this case, if you have 212 degrees Fahrenheit, you can use this formula to calculate the equivalent temperature in Celsius:\n\nCelsius = (212 - 32) * 5/9 = 100 * 5/9 = 50\n\nTherefore, 212 degrees Fahrenheit is equivalent to 50 degrees Celsius.", + "role": "assistant", + "completion_id": None, + "sequence": 1, + "response.model": "cohere.command-text-v14", + "vendor": "bedrock", + "ingest_source": "Python", + "is_response": True, + }, + ), + ], + "meta.llama2-13b-chat-v1": [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "span_id": None, + "trace_id": "trace-id", + "request_id": "6dd99878-0919-4f92-850c-48f50f923b76", + "duration": None, # Response time varies each test run + "request.model": "meta.llama2-13b-chat-v1", + "response.model": "meta.llama2-13b-chat-v1", + "request.temperature": 0.7, + "request.max_tokens": 100, + "response.choices.finish_reason": "length", + "vendor": "bedrock", + "ingest_source": "Python", + "response.number_of_messages": 2, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, # UUID that varies with each run + "request_id": "6dd99878-0919-4f92-850c-48f50f923b76", + "span_id": None, + "trace_id": "trace-id", + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "content": "What is 212 degrees Fahrenheit converted to Celsius?", + "role": "user", + "completion_id": None, + "sequence": 0, + "response.model": "meta.llama2-13b-chat-v1", + "vendor": "bedrock", + "ingest_source": "Python", + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, # UUID that varies with each run + "request_id": "6dd99878-0919-4f92-850c-48f50f923b76", + "span_id": None, + "trace_id": "trace-id", + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "content": " What is the conversion formula?\n\n212 degrees Fahrenheit is equal to 100 degrees Celsius.\n\nThe conversion formula is:\n\n°C = (°F - 32) × 5/9\n\nSo, to convert 212 degrees Fahrenheit to Celsius, we can use the formula like this:\n\n°C = (212 - 32) × 5/9\n", + "role": "assistant", + "completion_id": None, + "sequence": 1, + "response.model": "meta.llama2-13b-chat-v1", + "vendor": "bedrock", + "ingest_source": "Python", + "is_response": True, + }, + ), + ], +} + +chat_completion_invalid_model_error_events = [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": "f4908827-3db9-4742-9103-2bbc34578b03", + "span_id": None, + "trace_id": "trace-id", + "duration": None, # Response time varies each test run + "request.model": "does-not-exist", + "response.model": "does-not-exist", + "vendor": "bedrock", + "ingest_source": "Python", + "error": True, + }, + ), +] + +chat_completion_invalid_access_key_error_events = { + "amazon.titan-text-express-v1": [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "span_id": None, + "trace_id": "trace-id", + "request_id": "15b39c8b-8e85-42c9-9623-06720301bda3", + "duration": None, # Response time varies each test run + "request.model": "amazon.titan-text-express-v1", + "response.model": "amazon.titan-text-express-v1", + "request.temperature": 0.7, + "request.max_tokens": 100, + "vendor": "bedrock", + "ingest_source": "Python", + "response.number_of_messages": 1, + "error": True, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": "15b39c8b-8e85-42c9-9623-06720301bda3", + "span_id": None, + "trace_id": "trace-id", + "content": "Invalid Token", + "role": "user", + "completion_id": None, + "sequence": 0, + "response.model": "amazon.titan-text-express-v1", + "vendor": "bedrock", + "ingest_source": "Python", + }, + ), + ], + "ai21.j2-mid-v1": [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "span_id": None, + "trace_id": "trace-id", + "request_id": "9021791d-3797-493d-9277-e33aa6f6d544", + "duration": None, # Response time varies each test run + "request.model": "ai21.j2-mid-v1", + "response.model": "ai21.j2-mid-v1", + "request.temperature": 0.7, + "request.max_tokens": 100, + "vendor": "bedrock", + "ingest_source": "Python", + "response.number_of_messages": 1, + "error": True, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": "9021791d-3797-493d-9277-e33aa6f6d544", + "span_id": None, + "trace_id": "trace-id", + "content": "Invalid Token", + "role": "user", + "completion_id": None, + "sequence": 0, + "response.model": "ai21.j2-mid-v1", + "vendor": "bedrock", + "ingest_source": "Python", + }, + ), + ], + "anthropic.claude-instant-v1": [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "span_id": None, + "trace_id": "trace-id", + "request_id": "37396f55-b721-4bae-9461-4c369f5a080d", + "duration": None, # Response time varies each test run + "request.model": "anthropic.claude-instant-v1", + "response.model": "anthropic.claude-instant-v1", + "request.temperature": 0.7, + "request.max_tokens": 100, + "vendor": "bedrock", + "ingest_source": "Python", + "response.number_of_messages": 1, + "error": True, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": "37396f55-b721-4bae-9461-4c369f5a080d", + "span_id": None, + "trace_id": "trace-id", + "content": "Human: Invalid Token Assistant:", + "role": "user", + "completion_id": None, + "sequence": 0, + "response.model": "anthropic.claude-instant-v1", + "vendor": "bedrock", + "ingest_source": "Python", + }, + ), + ], + "cohere.command-text-v14": [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "span_id": None, + "trace_id": "trace-id", + "request_id": "22476490-a0d6-42db-b5ea-32d0b8a7f751", + "duration": None, # Response time varies each test run + "request.model": "cohere.command-text-v14", + "response.model": "cohere.command-text-v14", + "request.temperature": 0.7, + "request.max_tokens": 100, + "vendor": "bedrock", + "ingest_source": "Python", + "response.number_of_messages": 1, + "error": True, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": "22476490-a0d6-42db-b5ea-32d0b8a7f751", + "span_id": None, + "trace_id": "trace-id", + "content": "Invalid Token", + "role": "user", + "completion_id": None, + "sequence": 0, + "response.model": "cohere.command-text-v14", + "vendor": "bedrock", + "ingest_source": "Python", + }, + ), + ], + "meta.llama2-13b-chat-v1": [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "span_id": None, + "trace_id": "trace-id", + "request_id": "22476490-a0d6-42db-b5ea-32d0b8a7f751", + "duration": None, # Response time varies each test run + "request.model": "meta.llama2-13b-chat-v1", + "response.model": "meta.llama2-13b-chat-v1", + "request.temperature": 0.7, + "request.max_tokens": 100, + "vendor": "bedrock", + "ingest_source": "Python", + "response.number_of_messages": 1, + "error": True, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": "22476490-a0d6-42db-b5ea-32d0b8a7f751", + "span_id": None, + "trace_id": "trace-id", + "content": "Invalid Token", + "role": "user", + "completion_id": None, + "sequence": 0, + "response.model": "meta.llama2-13b-chat-v1", + "vendor": "bedrock", + "ingest_source": "Python", + }, + ), + ], +} + + +chat_completion_expected_malformed_request_body_events = [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "span_id": None, + "trace_id": "trace-id", + "request_id": "e72d1b46-9f16-4bf0-8eee-f7778f32e5a5", + "duration": None, # Response time varies each test run + "request.model": "amazon.titan-text-express-v1", + "response.model": "amazon.titan-text-express-v1", + "vendor": "bedrock", + "ingest_source": "Python", + "error": True, + }, + ), +] + +chat_completion_expected_malformed_response_body_events = [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "span_id": None, + "trace_id": "trace-id", + "request_id": "81508a1c-33a8-4294-8743-f0c629af2f49", + "duration": None, # Response time varies each test run + "request.model": "amazon.titan-text-express-v1", + "response.model": "amazon.titan-text-express-v1", + "request.temperature": 0.7, + "request.max_tokens": 100, + "vendor": "bedrock", + "ingest_source": "Python", + "response.number_of_messages": 1, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": "81508a1c-33a8-4294-8743-f0c629af2f49", + "span_id": None, + "trace_id": "trace-id", + "content": "Malformed Body", + "role": "user", + "completion_id": None, + "sequence": 0, + "response.model": "amazon.titan-text-express-v1", + "vendor": "bedrock", + "ingest_source": "Python", + }, + ), +] + +chat_completion_expected_malformed_response_streaming_body_events = [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "span_id": None, + "trace_id": "trace-id", + "request_id": "a5a8cebb-fd33-4437-8168-5667fbdfc1fb", + "duration": None, # Response time varies each test run + "request.model": "amazon.titan-text-express-v1", + "response.model": "amazon.titan-text-express-v1", + "request.temperature": 0.7, + "request.max_tokens": 100, + "vendor": "bedrock", + "ingest_source": "Python", + "response.number_of_messages": 1, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": "a5a8cebb-fd33-4437-8168-5667fbdfc1fb", + "span_id": None, + "trace_id": "trace-id", + "content": "Malformed Streaming Body", + "role": "user", + "completion_id": None, + "sequence": 0, + "response.model": "amazon.titan-text-express-v1", + "vendor": "bedrock", + "ingest_source": "Python", + }, + ), +] + +chat_completion_expected_malformed_response_streaming_chunk_events = [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "span_id": None, + "trace_id": "trace-id", + "request_id": "a5a8cebb-fd33-4437-8168-5667fbdfc1fb", + "duration": None, # Response time varies each test run + "request.model": "amazon.titan-text-express-v1", + "response.model": "amazon.titan-text-express-v1", + "request.temperature": 0.7, + "request.max_tokens": 100, + "vendor": "bedrock", + "ingest_source": "Python", + "response.number_of_messages": 1, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": "a5a8cebb-fd33-4437-8168-5667fbdfc1fb", + "span_id": None, + "trace_id": "trace-id", + "content": "Malformed Streaming Chunk", + "role": "user", + "completion_id": None, + "sequence": 0, + "response.model": "amazon.titan-text-express-v1", + "vendor": "bedrock", + "ingest_source": "Python", + }, + ), +] + + +chat_completion_expected_streaming_error_events = [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "span_id": None, + "trace_id": "trace-id", + "duration": None, # Response time varies each test run + "request.model": "amazon.titan-text-express-v1", + "response.model": "amazon.titan-text-express-v1", + "request.temperature": 0.7, + "request.max_tokens": 100, + "vendor": "bedrock", + "ingest_source": "Python", + "response.number_of_messages": 1, + "error": True, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "span_id": None, + "trace_id": "trace-id", + "content": "Streaming Exception", + "role": "user", + "completion_id": None, + "sequence": 0, + "response.model": "amazon.titan-text-express-v1", + "vendor": "bedrock", + "ingest_source": "Python", + }, + ), +] diff --git a/tests/external_botocore/_test_bedrock_embeddings.py b/tests/external_botocore/_test_bedrock_embeddings.py new file mode 100644 index 000000000..66f609f7b --- /dev/null +++ b/tests/external_botocore/_test_bedrock_embeddings.py @@ -0,0 +1,200 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +embedding_payload_templates = { + "amazon.titan-embed-text-v1": '{ "inputText": "%s" }', + "amazon.titan-embed-g1-text-02": '{ "inputText": "%s" }', + "cohere.embed-english-v3": '{"texts": ["%s"], "input_type": "search_document"}', +} + +embedding_expected_events = { + "amazon.titan-embed-text-v1": [ + ( + {"type": "LlmEmbedding"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "span_id": None, + "trace_id": "trace-id", + "input": "This is an embedding test.", + "duration": None, # Response time varies each test run + "response.model": "amazon.titan-embed-text-v1", + "request.model": "amazon.titan-embed-text-v1", + "request_id": "11233989-07e8-4ecb-9ba6-79601ba6d8cc", + "vendor": "bedrock", + "ingest_source": "Python", + }, + ), + ], + "amazon.titan-embed-g1-text-02": [ + ( + {"type": "LlmEmbedding"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "span_id": None, + "trace_id": "trace-id", + "input": "This is an embedding test.", + "duration": None, # Response time varies each test run + "response.model": "amazon.titan-embed-g1-text-02", + "request.model": "amazon.titan-embed-g1-text-02", + "request_id": "b10ac895-eae3-4f07-b926-10b2866c55ed", + "vendor": "bedrock", + "ingest_source": "Python", + }, + ), + ], + "cohere.embed-english-v3": [ + ( + {"type": "LlmEmbedding"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "span_id": None, + "trace_id": "trace-id", + "input": "['This is an embedding test.']", + "duration": None, # Response time varies each test run + "response.model": "cohere.embed-english-v3", + "request.model": "cohere.embed-english-v3", + "request_id": "11233989-07e8-4ecb-9ba6-79601ba6d8cc", + "vendor": "bedrock", + "ingest_source": "Python", + }, + ), + ], +} + +embedding_invalid_access_key_error_events = { + "amazon.titan-embed-text-v1": [ + ( + {"type": "LlmEmbedding"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "span_id": None, + "trace_id": "trace-id", + "input": "Invalid Token", + "duration": None, # Response time varies each test run + "request.model": "amazon.titan-embed-text-v1", + "response.model": "amazon.titan-embed-text-v1", + "request_id": "aece6ad7-e2ff-443b-a953-ba7d385fd0cc", + "vendor": "bedrock", + "ingest_source": "Python", + "error": True, + }, + ), + ], + "amazon.titan-embed-g1-text-02": [ + ( + {"type": "LlmEmbedding"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "span_id": None, + "trace_id": "trace-id", + "input": "Invalid Token", + "duration": None, # Response time varies each test run + "request.model": "amazon.titan-embed-g1-text-02", + "response.model": "amazon.titan-embed-g1-text-02", + "request_id": "73328313-506e-4da8-af0f-51017fa6ca3f", + "vendor": "bedrock", + "ingest_source": "Python", + "error": True, + }, + ), + ], + "cohere.embed-english-v3": [ + ( + {"type": "LlmEmbedding"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "span_id": None, + "trace_id": "trace-id", + "input": "['Invalid Token']", + "duration": None, # Response time varies each test run + "request.model": "cohere.embed-english-v3", + "response.model": "cohere.embed-english-v3", + "request_id": "73328313-506e-4da8-af0f-51017fa6ca3f", + "vendor": "bedrock", + "ingest_source": "Python", + "error": True, + }, + ), + ], +} + +embedding_expected_client_errors = { + "amazon.titan-embed-text-v1": { + "http.statusCode": 403, + "error.message": "The security token included in the request is invalid.", + "error.code": "UnrecognizedClientException", + }, + "amazon.titan-embed-g1-text-02": { + "http.statusCode": 403, + "error.message": "The security token included in the request is invalid.", + "error.code": "UnrecognizedClientException", + }, + "cohere.embed-english-v3": { + "http.statusCode": 403, + "error.message": "The security token included in the request is invalid.", + "error.code": "UnrecognizedClientException", + }, +} + +embedding_expected_malformed_request_body_events = [ + ( + {"type": "LlmEmbedding"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "span_id": None, + "trace_id": "trace-id", + "duration": None, # Response time varies each test run + "request.model": "amazon.titan-embed-g1-text-02", + "response.model": "amazon.titan-embed-g1-text-02", + "request_id": "b3646569-18c5-4173-a9fa-bbe9c648f636", + "vendor": "bedrock", + "ingest_source": "Python", + "error": True, + }, + ), +] + +embedding_expected_malformed_response_body_events = [ + ( + {"type": "LlmEmbedding"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "span_id": None, + "trace_id": "trace-id", + "input": "Malformed Body", + "duration": None, # Response time varies each test run + "request.model": "amazon.titan-embed-g1-text-02", + "response.model": "amazon.titan-embed-g1-text-02", + "request_id": "b10ac895-eae3-4f07-b926-10b2866c55ed", + "vendor": "bedrock", + "ingest_source": "Python", + }, + ), +] diff --git a/tests/external_botocore/conftest.py b/tests/external_botocore/conftest.py index fb703c85e..65acfd1e5 100644 --- a/tests/external_botocore/conftest.py +++ b/tests/external_botocore/conftest.py @@ -12,10 +12,31 @@ # See the License for the specific language governing permissions and # limitations under the License. -from testing_support.fixtures import ( # noqa: F401; pylint: disable=W0611 +import io +import json +import os +import re + +import pytest +from _mock_external_bedrock_server import ( + MockExternalBedrockServer, + extract_shortened_prompt, +) +from botocore.response import StreamingBody +from testing_support.fixtures import ( # noqa: F401, pylint: disable=W0611 collector_agent_registration_fixture, collector_available_fixture, + override_application_settings, +) + +from newrelic.common.object_wrapper import wrap_function_wrapper +from newrelic.common.package_version_utils import ( + get_package_version, + get_package_version_tuple, ) +from newrelic.common.signature import bind_args + +BOTOCORE_VERSION = get_package_version("botocore") _default_settings = { "transaction_tracer.explain_threshold": 0.0, @@ -23,8 +44,137 @@ "transaction_tracer.stack_trace_threshold": 0.0, "debug.log_data_collector_payloads": True, "debug.record_transaction_failure": True, + "custom_insights_events.max_attribute_value": 4096, + "ai_monitoring.enabled": True, } - collector_agent_registration = collector_agent_registration_fixture( - app_name="Python Agent Test (external_botocore)", default_settings=_default_settings + app_name="Python Agent Test (external_botocore)", + default_settings=_default_settings, + linked_applications=["Python Agent Test (external_botocore)"], ) + + +# Bedrock Fixtures +BEDROCK_AUDIT_LOG_FILE = os.path.join(os.path.realpath(os.path.dirname(__file__)), "bedrock_audit.log") +BEDROCK_AUDIT_LOG_CONTENTS = {} + + +@pytest.fixture(scope="session") +def bedrock_server(): + """ + This fixture will either create a mocked backend for testing purposes, or will + set up an audit log file to log responses of the real Bedrock backend to a file. + The behavior can be controlled by setting NEW_RELIC_TESTING_RECORD_BEDROCK_RESPONSES=1 as + an environment variable to run using the real Bedrock backend. (Default: mocking) + """ + import boto3 + + from newrelic.core.config import _environ_as_bool + + if get_package_version_tuple("botocore") < (1, 31, 57): + pytest.skip(reason="Bedrock Runtime not available.") + + if not _environ_as_bool("NEW_RELIC_TESTING_RECORD_BEDROCK_RESPONSES", False): + # Use mocked Bedrock backend and prerecorded responses + with MockExternalBedrockServer() as server: + client = boto3.client( # nosec + "bedrock-runtime", + "us-east-1", + endpoint_url="http://localhost:%d" % server.port, + aws_access_key_id="NOT-A-REAL-SECRET", + aws_secret_access_key="NOT-A-REAL-SECRET", + ) + + yield client + else: + # Use real Bedrock backend and record responses + assert ( + os.environ["AWS_ACCESS_KEY_ID"] and os.environ["AWS_SECRET_ACCESS_KEY"] + ), "AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY are required." + + # Construct real client + client = boto3.client( + "bedrock-runtime", + "us-east-1", + ) + + # Apply function wrappers to record data + wrap_function_wrapper( + "botocore.endpoint", "Endpoint._do_get_response", wrap_botocore_endpoint_Endpoint__do_get_response + ) + wrap_function_wrapper( + "botocore.eventstream", + "EventStreamBuffer.add_data", + wrap_botocore_eventstream_add_data, + ) + yield client # Run tests + + # Write responses to audit log + bedrock_audit_log_contents = dict(sorted(BEDROCK_AUDIT_LOG_CONTENTS.items(), key=lambda i: (i[1][1], i[0]))) + with open(BEDROCK_AUDIT_LOG_FILE, "w") as audit_log_fp: + json.dump(bedrock_audit_log_contents, fp=audit_log_fp, indent=4) + + +# Intercept outgoing requests and log to file for mocking +RECORDED_HEADERS = set(["x-amzn-requestid", "x-amzn-errortype", "content-type"]) + + +def wrap_botocore_endpoint_Endpoint__do_get_response(wrapped, instance, args, kwargs): + request = bind__do_get_response(*args, **kwargs) + if not request: + return wrapped(*args, **kwargs) + + match = re.search(r"/model/([0-9a-zA-Z.-]+)/", request.url) + model = match.group(1) + + # Send request + result = wrapped(*args, **kwargs) + + # Unpack response + success, exception = result + response = (success or exception)[0] + + if isinstance(request.body, io.BytesIO): + request.body.seek(0) + body = request.body.read() + else: + body = request.body + + try: + content = json.loads(body) + except Exception: + content = body.decode("utf-8") + + prompt = extract_shortened_prompt(content, model) + headers = dict(response.headers.items()) + headers = dict( + filter( + lambda k: k[0].lower() in RECORDED_HEADERS or k[0].startswith("x-ratelimit"), + headers.items(), + ) + ) + status_code = response.status_code + + # Log response + if response.raw.chunked: + # Log response + BEDROCK_AUDIT_LOG_CONTENTS[prompt] = headers, status_code, [] # Append response data to audit log + else: + # Clean up data + response_content = response.content + data = json.loads(response_content.decode("utf-8")) + result[0][1]["body"] = StreamingBody(io.BytesIO(response_content), len(response_content)) + BEDROCK_AUDIT_LOG_CONTENTS[prompt] = headers, status_code, data # Append response data to audit log + return result + + +def bind__do_get_response(request, operation_model, context): + return request + + +def wrap_botocore_eventstream_add_data(wrapped, instance, args, kwargs): + bound_args = bind_args(wrapped, args, kwargs) + data = bound_args["data"].hex() # convert bytes to hex for storage + prompt = [k for k in BEDROCK_AUDIT_LOG_CONTENTS.keys()][-1] + BEDROCK_AUDIT_LOG_CONTENTS[prompt][2].append(data) + return wrapped(*args, **kwargs) diff --git a/tests/external_botocore/test_bedrock_chat_completion.py b/tests/external_botocore/test_bedrock_chat_completion.py new file mode 100644 index 000000000..fc25e15ad --- /dev/null +++ b/tests/external_botocore/test_bedrock_chat_completion.py @@ -0,0 +1,957 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import json +import os +from io import BytesIO + +import boto3 +import botocore.errorfactory +import botocore.eventstream +import botocore.exceptions +import pytest +from _test_bedrock_chat_completion import ( + chat_completion_expected_events, + chat_completion_expected_malformed_request_body_events, + chat_completion_expected_malformed_response_body_events, + chat_completion_expected_malformed_response_streaming_body_events, + chat_completion_expected_malformed_response_streaming_chunk_events, + chat_completion_expected_streaming_error_events, + chat_completion_invalid_access_key_error_events, + chat_completion_invalid_model_error_events, + chat_completion_payload_templates, + chat_completion_streaming_expected_events, +) +from conftest import BOTOCORE_VERSION # pylint: disable=E0611 +from testing_support.fixtures import ( + override_llm_token_callback_settings, + reset_core_stats_engine, + validate_attributes, + validate_custom_event_count, +) +from testing_support.ml_testing_utils import ( # noqa: F401 + add_token_count_to_events, + disabled_ai_monitoring_record_content_settings, + disabled_ai_monitoring_settings, + disabled_ai_monitoring_streaming_settings, + events_sans_content, + events_sans_llm_metadata, + llm_token_count_callback, + set_trace_info, +) +from testing_support.validators.validate_custom_events import validate_custom_events +from testing_support.validators.validate_error_trace_attributes import ( + validate_error_trace_attributes, +) +from testing_support.validators.validate_transaction_metrics import ( + validate_transaction_metrics, +) + +from newrelic.api.background_task import background_task +from newrelic.api.transaction import add_custom_attribute +from newrelic.common.object_names import callable_name +from newrelic.hooks.external_botocore import MODEL_EXTRACTORS + + +@pytest.fixture(scope="session", params=[False, True], ids=["ResponseStandard", "ResponseStreaming"]) +def response_streaming(request): + return request.param + + +@pytest.fixture(scope="session", params=[False, True], ids=["RequestStandard", "RequestStreaming"]) +def request_streaming(request): + return request.param + + +@pytest.fixture( + scope="module", + params=[ + "amazon.titan-text-express-v1", + "ai21.j2-mid-v1", + "anthropic.claude-instant-v1", + "cohere.command-text-v14", + "meta.llama2-13b-chat-v1", + ], +) +def model_id(request, response_streaming): + model = request.param + if response_streaming and model == "ai21.j2-mid-v1": + pytest.skip(reason="Streaming not supported.") + + return model + + +@pytest.fixture(scope="module") +def exercise_model(bedrock_server, model_id, request_streaming, response_streaming): + payload_template = chat_completion_payload_templates[model_id] + + def _exercise_model(prompt, temperature=0.7, max_tokens=100): + body = (payload_template % (prompt, temperature, max_tokens)).encode("utf-8") + if request_streaming: + body = BytesIO(body) + + response = bedrock_server.invoke_model( + body=body, + modelId=model_id, + accept="application/json", + contentType="application/json", + ) + response_body = json.loads(response.get("body").read()) + assert response_body + + return response_body + + def _exercise_streaming_model(prompt, temperature=0.7, max_tokens=100): + body = (payload_template % (prompt, temperature, max_tokens)).encode("utf-8") + if request_streaming: + body = BytesIO(body) + + response = bedrock_server.invoke_model_with_response_stream( + body=body, + modelId=model_id, + accept="application/json", + contentType="application/json", + ) + body = response.get("body") + for resp in body: + assert resp + + if response_streaming: + return _exercise_streaming_model + else: + return _exercise_model + + +@pytest.fixture(scope="module") +def expected_events(model_id, response_streaming): + if response_streaming: + return chat_completion_streaming_expected_events[model_id] + else: + return chat_completion_expected_events[model_id] + + +@pytest.fixture(scope="module") +def expected_metrics(response_streaming): + if response_streaming: + return [("Llm/completion/Bedrock/invoke_model_with_response_stream", 1)] + else: + return [("Llm/completion/Bedrock/invoke_model", 1)] + + +@pytest.fixture(scope="module") +def expected_invalid_access_key_error_events(model_id): + return chat_completion_invalid_access_key_error_events[model_id] + + +_test_bedrock_chat_completion_prompt = "What is 212 degrees Fahrenheit converted to Celsius?" + + +@reset_core_stats_engine() +def test_bedrock_chat_completion_in_txn_with_llm_metadata( + set_trace_info, exercise_model, expected_events, expected_metrics +): + @validate_custom_events(expected_events) + # One summary event, one user message, and one response message from the assistant + @validate_custom_event_count(count=3) + @validate_transaction_metrics( + name="test_bedrock_chat_completion_in_txn_with_llm_metadata", + scoped_metrics=expected_metrics, + rollup_metrics=expected_metrics, + custom_metrics=[ + ("Supportability/Python/ML/Bedrock/%s" % BOTOCORE_VERSION, 1), + ], + background_task=True, + ) + @validate_attributes("agent", ["llm"]) + @background_task(name="test_bedrock_chat_completion_in_txn_with_llm_metadata") + def _test(): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") + exercise_model(prompt=_test_bedrock_chat_completion_prompt, temperature=0.7, max_tokens=100) + + _test() + + +@disabled_ai_monitoring_record_content_settings +@reset_core_stats_engine() +def test_bedrock_chat_completion_no_content(set_trace_info, exercise_model, expected_events, expected_metrics): + @validate_custom_events(events_sans_content(expected_events)) + # One summary event, one user message, and one response message from the assistant + @validate_custom_event_count(count=3) + @validate_transaction_metrics( + name="test_bedrock_chat_completion_no_content", + scoped_metrics=expected_metrics, + rollup_metrics=expected_metrics, + custom_metrics=[ + ("Supportability/Python/ML/Bedrock/%s" % BOTOCORE_VERSION, 1), + ], + background_task=True, + ) + @validate_attributes("agent", ["llm"]) + @background_task(name="test_bedrock_chat_completion_no_content") + def _test(): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") + exercise_model(prompt=_test_bedrock_chat_completion_prompt, temperature=0.7, max_tokens=100) + + _test() + + +@reset_core_stats_engine() +@override_llm_token_callback_settings(llm_token_count_callback) +def test_bedrock_chat_completion_with_token_count(set_trace_info, exercise_model, expected_events, expected_metrics): + @validate_custom_events(add_token_count_to_events(expected_events)) + # One summary event, one user message, and one response message from the assistant + @validate_custom_event_count(count=3) + @validate_transaction_metrics( + name="test_bedrock_chat_completion_with_token_count", + scoped_metrics=expected_metrics, + rollup_metrics=expected_metrics, + custom_metrics=[ + ("Supportability/Python/ML/Bedrock/%s" % BOTOCORE_VERSION, 1), + ], + background_task=True, + ) + @validate_attributes("agent", ["llm"]) + @background_task(name="test_bedrock_chat_completion_with_token_count") + def _test(): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") + exercise_model(prompt=_test_bedrock_chat_completion_prompt, temperature=0.7, max_tokens=100) + + _test() + + +@reset_core_stats_engine() +def test_bedrock_chat_completion_no_llm_metadata(set_trace_info, exercise_model, expected_events, expected_metrics): + @validate_custom_events(events_sans_llm_metadata(expected_events)) + # One summary event, one user message, and one response message from the assistant + @validate_custom_event_count(count=3) + @validate_transaction_metrics( + name="test_bedrock_chat_completion_in_txn_no_llm_metadata", + scoped_metrics=expected_metrics, + rollup_metrics=expected_metrics, + custom_metrics=[ + ("Supportability/Python/ML/Bedrock/%s" % BOTOCORE_VERSION, 1), + ], + background_task=True, + ) + @background_task(name="test_bedrock_chat_completion_in_txn_no_llm_metadata") + def _test(): + set_trace_info() + exercise_model(prompt=_test_bedrock_chat_completion_prompt, temperature=0.7, max_tokens=100) + + _test() + + +@reset_core_stats_engine() +@validate_custom_event_count(count=0) +def test_bedrock_chat_completion_outside_txn(exercise_model): + add_custom_attribute("llm.conversation_id", "my-awesome-id") + exercise_model(prompt=_test_bedrock_chat_completion_prompt, temperature=0.7, max_tokens=100) + + +@disabled_ai_monitoring_settings +@reset_core_stats_engine() +@validate_custom_event_count(count=0) +@background_task(name="test_bedrock_chat_completion_disabled_ai_monitoring_setting") +def test_bedrock_chat_completion_disabled_ai_monitoring_settings(set_trace_info, exercise_model): + set_trace_info() + exercise_model(prompt=_test_bedrock_chat_completion_prompt, temperature=0.7, max_tokens=100) + + +@reset_core_stats_engine() +@disabled_ai_monitoring_streaming_settings +def test_bedrock_chat_completion_streaming_disabled( + bedrock_server, +): + """Streaming is disabled, but the rest of the AI settings are enabled. Custom events should not be collected.""" + + @validate_custom_event_count(count=0) + @validate_transaction_metrics( + name="test_bedrock_chat_completion", + scoped_metrics=[("Llm/completion/Bedrock/invoke_model_with_response_stream", 1)], + rollup_metrics=[("Llm/completion/Bedrock/invoke_model_with_response_stream", 1)], + custom_metrics=[ + ("Supportability/Python/ML/Bedrock/%s" % BOTOCORE_VERSION, 1), + ], + background_task=True, + ) + @background_task(name="test_bedrock_chat_completion") + def _test(): + model = "amazon.titan-text-express-v1" + body = (chat_completion_payload_templates[model] % (_test_bedrock_chat_completion_prompt, 0.7, 100)).encode( + "utf-8" + ) + + response = bedrock_server.invoke_model_with_response_stream( + body=body, + modelId=model, + accept="application/json", + contentType="application/json", + ) + list(response["body"]) # Iterate + + _test() + + +_client_error = botocore.exceptions.ClientError +_client_error_name = callable_name(_client_error) + + +@reset_core_stats_engine() +def test_bedrock_chat_completion_error_invalid_model( + bedrock_server, set_trace_info, response_streaming, expected_metrics +): + @validate_custom_events(chat_completion_invalid_model_error_events) + @validate_error_trace_attributes( + "botocore.errorfactory:ValidationException", + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "http.statusCode": 400, + "error.message": "The provided model identifier is invalid.", + "error.code": "ValidationException", + }, + }, + ) + @validate_transaction_metrics( + name="test_bedrock_chat_completion_error_invalid_model", + scoped_metrics=expected_metrics, + rollup_metrics=expected_metrics, + custom_metrics=[ + ("Supportability/Python/ML/Bedrock/%s" % BOTOCORE_VERSION, 1), + ], + background_task=True, + ) + @background_task(name="test_bedrock_chat_completion_error_invalid_model") + def _test(): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") + + with pytest.raises(_client_error): + if response_streaming: + stream = bedrock_server.invoke_model_with_response_stream( + body=b"{}", + modelId="does-not-exist", + accept="application/json", + contentType="application/json", + ) + for _ in stream: + pass + else: + bedrock_server.invoke_model( + body=b"{}", + modelId="does-not-exist", + accept="application/json", + contentType="application/json", + ) + + _test() + + +@reset_core_stats_engine() +def test_bedrock_chat_completion_error_incorrect_access_key( + monkeypatch, + bedrock_server, + exercise_model, + set_trace_info, + expected_invalid_access_key_error_events, + expected_metrics, +): + """ + A request is made to the server with invalid credentials. botocore will reach out to the server and receive an + UnrecognizedClientException as a response. Information from the request will be parsed and reported in customer + events. The error response can also be parsed, and will be included as attributes on the recorded exception. + """ + + @validate_custom_events(expected_invalid_access_key_error_events) + @validate_error_trace_attributes( + _client_error_name, + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "http.statusCode": 403, + "error.message": "The security token included in the request is invalid.", + "error.code": "UnrecognizedClientException", + }, + }, + ) + @validate_transaction_metrics( + name="test_bedrock_chat_completion", + scoped_metrics=expected_metrics, + rollup_metrics=expected_metrics, + custom_metrics=[ + ("Supportability/Python/ML/Bedrock/%s" % BOTOCORE_VERSION, 1), + ], + background_task=True, + ) + @background_task(name="test_bedrock_chat_completion") + def _test(): + monkeypatch.setattr(bedrock_server._request_signer._credentials, "access_key", "INVALID-ACCESS-KEY") + + with pytest.raises(_client_error): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") + + exercise_model(prompt="Invalid Token", temperature=0.7, max_tokens=100) + + _test() + + +@reset_core_stats_engine() +@disabled_ai_monitoring_record_content_settings +def test_bedrock_chat_completion_error_incorrect_access_key_no_content( + monkeypatch, + bedrock_server, + exercise_model, + set_trace_info, + expected_invalid_access_key_error_events, + expected_metrics, +): + """ + Duplicate of test_bedrock_chat_completion_error_incorrect_access_key, but with content recording disabled. + + See the original test for a description of the error case. + """ + + @validate_custom_events(events_sans_content(expected_invalid_access_key_error_events)) + @validate_error_trace_attributes( + _client_error_name, + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "http.statusCode": 403, + "error.message": "The security token included in the request is invalid.", + "error.code": "UnrecognizedClientException", + }, + }, + ) + @validate_transaction_metrics( + name="test_bedrock_chat_completion", + scoped_metrics=expected_metrics, + rollup_metrics=expected_metrics, + custom_metrics=[ + ("Supportability/Python/ML/Bedrock/%s" % BOTOCORE_VERSION, 1), + ], + background_task=True, + ) + @background_task(name="test_bedrock_chat_completion") + def _test(): + monkeypatch.setattr(bedrock_server._request_signer._credentials, "access_key", "INVALID-ACCESS-KEY") + + with pytest.raises(_client_error): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") + + exercise_model(prompt="Invalid Token", temperature=0.7, max_tokens=100) + + _test() + + +@reset_core_stats_engine() +@override_llm_token_callback_settings(llm_token_count_callback) +def test_bedrock_chat_completion_error_incorrect_access_key_with_token( + monkeypatch, + bedrock_server, + exercise_model, + set_trace_info, + expected_invalid_access_key_error_events, + expected_metrics, +): + @validate_custom_events(add_token_count_to_events(expected_invalid_access_key_error_events)) + @validate_error_trace_attributes( + _client_error_name, + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "http.statusCode": 403, + "error.message": "The security token included in the request is invalid.", + "error.code": "UnrecognizedClientException", + }, + }, + ) + @validate_transaction_metrics( + name="test_bedrock_chat_completion", + scoped_metrics=expected_metrics, + rollup_metrics=expected_metrics, + custom_metrics=[ + ("Supportability/Python/ML/Bedrock/%s" % BOTOCORE_VERSION, 1), + ], + background_task=True, + ) + @background_task(name="test_bedrock_chat_completion") + def _test(): + monkeypatch.setattr(bedrock_server._request_signer._credentials, "access_key", "INVALID-ACCESS-KEY") + + with pytest.raises(_client_error): # not sure where this exception actually comes from + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") + + exercise_model(prompt="Invalid Token", temperature=0.7, max_tokens=100) + + _test() + + +@reset_core_stats_engine() +def test_bedrock_chat_completion_error_malformed_request_body( + bedrock_server, + set_trace_info, + response_streaming, + expected_metrics, +): + """ + A request was made to the server, but the request body contains invalid JSON. The library will accept the invalid + payload, and still send a request. Our instrumentation will be unable to read it. As a result, no request + information will be recorded in custom events. This includes the initial prompt message event, which cannot be read + so it cannot be captured. The server will then respond with a ValidationException response immediately due to the + bad request. The response can still be parsed, so error information from the response will be recorded as normal. + """ + + @validate_custom_events(chat_completion_expected_malformed_request_body_events) + @validate_custom_event_count(count=1) + @validate_error_trace_attributes( + "botocore.errorfactory:ValidationException", + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "http.statusCode": 400, + "error.message": "Malformed input request, please reformat your input and try again.", + "error.code": "ValidationException", + }, + }, + ) + @validate_transaction_metrics( + name="test_bedrock_chat_completion", + scoped_metrics=expected_metrics, + rollup_metrics=expected_metrics, + custom_metrics=[ + ("Supportability/Python/ML/Bedrock/%s" % BOTOCORE_VERSION, 1), + ], + background_task=True, + ) + @background_task(name="test_bedrock_chat_completion") + def _test(): + model = "amazon.titan-text-express-v1" + body = "{ Malformed Request Body".encode("utf-8") + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") + + with pytest.raises(_client_error): + if response_streaming: + bedrock_server.invoke_model_with_response_stream( + body=body, + modelId=model, + accept="application/json", + contentType="application/json", + ) + else: + bedrock_server.invoke_model( + body=body, + modelId=model, + accept="application/json", + contentType="application/json", + ) + + _test() + + +@reset_core_stats_engine() +def test_bedrock_chat_completion_error_malformed_response_body( + bedrock_server, + set_trace_info, +): + """ + After a non-streaming request was made to the server, the server responded with a response body that contains + invalid JSON. Since the JSON body is not parsed by botocore and just returned to the user as bytes, no parsing + exceptions will be raised. Instrumentation will attempt to parse the invalid body, and should not raise an + exception when it fails to do so. As a result, recorded events will not contain the streamed response data but will contain the request data. + """ + + @validate_custom_events(chat_completion_expected_malformed_response_body_events) + @validate_custom_event_count(count=2) + @validate_transaction_metrics( + name="test_bedrock_chat_completion", + scoped_metrics=[("Llm/completion/Bedrock/invoke_model", 1)], + rollup_metrics=[("Llm/completion/Bedrock/invoke_model", 1)], + custom_metrics=[ + ("Supportability/Python/ML/Bedrock/%s" % BOTOCORE_VERSION, 1), + ], + background_task=True, + ) + @background_task(name="test_bedrock_chat_completion") + def _test(): + model = "amazon.titan-text-express-v1" + body = (chat_completion_payload_templates[model] % ("Malformed Body", 0.7, 100)).encode("utf-8") + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") + + response = bedrock_server.invoke_model( + body=body, + modelId=model, + accept="application/json", + contentType="application/json", + ) + assert response + + _test() + + +@reset_core_stats_engine() +def test_bedrock_chat_completion_error_malformed_response_streaming_body( + bedrock_server, + set_trace_info, +): + """ + A chunk in the stream returned by the server is valid, but contains a body with JSON that cannot be parsed. + Since the JSON body is not parsed by botocore and just returned to the user as bytes, no parsing exceptions will + be raised. Instrumentation will attempt to parse the invalid body, and should not raise an exception when it fails + to do so. The result should be all streamed response data missing from the recorded events, but request and summary + events are recorded as normal. + """ + + @validate_custom_events(chat_completion_expected_malformed_response_streaming_body_events) + @validate_custom_event_count(count=2) + @validate_transaction_metrics( + name="test_bedrock_chat_completion", + scoped_metrics=[("Llm/completion/Bedrock/invoke_model_with_response_stream", 1)], + rollup_metrics=[("Llm/completion/Bedrock/invoke_model_with_response_stream", 1)], + custom_metrics=[ + ("Supportability/Python/ML/Bedrock/%s" % BOTOCORE_VERSION, 1), + ], + background_task=True, + ) + @background_task(name="test_bedrock_chat_completion") + def _test(): + model = "amazon.titan-text-express-v1" + body = (chat_completion_payload_templates[model] % ("Malformed Streaming Body", 0.7, 100)).encode("utf-8") + + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") + + response = bedrock_server.invoke_model_with_response_stream( + body=body, + modelId=model, + accept="application/json", + contentType="application/json", + ) + + chunks = list(response["body"]) + assert chunks, "No response chunks returned" + for chunk in chunks: + with pytest.raises(json.decoder.JSONDecodeError): + json.loads(chunk["chunk"]["bytes"]) + + _test() + + +@reset_core_stats_engine() +def test_bedrock_chat_completion_error_malformed_response_streaming_chunk( + bedrock_server, + set_trace_info, +): + """ + A chunk in the stream returned by the server is missing the prelude which causes an InvalidHeadersLength exception + to be raised during parsing of the chunk. Since the streamed chunk is not able to be parsed, the response + attribute on the raised exception is not present. This means all streamed response data will be missing from the + recorded events. + """ + + @validate_custom_events(chat_completion_expected_malformed_response_streaming_chunk_events) + @validate_custom_event_count(count=2) + @validate_error_trace_attributes( + "botocore.eventstream:InvalidHeadersLength", + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "llm.conversation_id": "my-awesome-id", + }, + }, + forgone_params={ + "agent": (), + "intrinsic": (), + "user": ("http.statusCode", "error.message", "error.code"), + }, + ) + @validate_transaction_metrics( + name="test_bedrock_chat_completion", + scoped_metrics=[("Llm/completion/Bedrock/invoke_model_with_response_stream", 1)], + rollup_metrics=[("Llm/completion/Bedrock/invoke_model_with_response_stream", 1)], + custom_metrics=[ + ("Supportability/Python/ML/Bedrock/%s" % BOTOCORE_VERSION, 1), + ], + background_task=True, + ) + @background_task(name="test_bedrock_chat_completion") + def _test(): + model = "amazon.titan-text-express-v1" + body = (chat_completion_payload_templates[model] % ("Malformed Streaming Chunk", 0.7, 100)).encode("utf-8") + with pytest.raises(botocore.eventstream.InvalidHeadersLength): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") + + response = bedrock_server.invoke_model_with_response_stream( + body=body, + modelId=model, + accept="application/json", + contentType="application/json", + ) + response = "".join(chunk for chunk in response["body"]) + assert response + + _test() + + +_event_stream_error = botocore.exceptions.EventStreamError +_event_stream_error_name = "botocore.exceptions:EventStreamError" + + +@reset_core_stats_engine() +def test_bedrock_chat_completion_error_streaming_exception( + bedrock_server, + set_trace_info, +): + """ + During a streaming call, the streamed chunk's headers indicate an error. These headers are not HTTP headers, but + headers embedded in the binary format of the response from the server. The streamed chunk's response body is not + required to contain any information regarding the exception, the headers are sufficient to cause botocore's + parser to raise an actual exception based on the error code. The response attribute on the raised exception will + contain the error information. This means error data will be reported for the response, but all response message + data will be missing from the recorded events since the server returned an error instead of message data inside + the streamed response. + """ + + @validate_custom_events(chat_completion_expected_streaming_error_events) + @validate_custom_event_count(count=2) + @validate_error_trace_attributes( + _event_stream_error_name, + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "error.message": "Malformed input request, please reformat your input and try again.", + "error.code": "ValidationException", + }, + }, + forgone_params={ + "agent": (), + "intrinsic": (), + "user": ("http.statusCode"), + }, + ) + @validate_transaction_metrics( + name="test_bedrock_chat_completion", + scoped_metrics=[("Llm/completion/Bedrock/invoke_model_with_response_stream", 1)], + rollup_metrics=[("Llm/completion/Bedrock/invoke_model_with_response_stream", 1)], + custom_metrics=[ + ("Supportability/Python/ML/Bedrock/%s" % BOTOCORE_VERSION, 1), + ], + background_task=True, + ) + @background_task(name="test_bedrock_chat_completion") + def _test(): + with pytest.raises(_event_stream_error): + model = "amazon.titan-text-express-v1" + body = (chat_completion_payload_templates[model] % ("Streaming Exception", 0.7, 100)).encode("utf-8") + + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") + + response = bedrock_server.invoke_model_with_response_stream( + body=body, + modelId=model, + accept="application/json", + contentType="application/json", + ) + list(response["body"]) # Iterate + + _test() + + +@reset_core_stats_engine() +@disabled_ai_monitoring_record_content_settings +def test_bedrock_chat_completion_error_streaming_exception_no_content( + bedrock_server, + set_trace_info, +): + """ + Duplicate of test_bedrock_chat_completion_error_streaming_exception, but with content recording disabled. + + See the original test for a description of the error case. + """ + + @validate_custom_events(events_sans_content(chat_completion_expected_streaming_error_events)) + @validate_custom_event_count(count=2) + @validate_error_trace_attributes( + _event_stream_error_name, + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "error.message": "Malformed input request, please reformat your input and try again.", + "error.code": "ValidationException", + }, + }, + forgone_params={ + "agent": (), + "intrinsic": (), + "user": ("http.statusCode"), + }, + ) + @validate_transaction_metrics( + name="test_bedrock_chat_completion", + scoped_metrics=[("Llm/completion/Bedrock/invoke_model_with_response_stream", 1)], + rollup_metrics=[("Llm/completion/Bedrock/invoke_model_with_response_stream", 1)], + custom_metrics=[ + ("Supportability/Python/ML/Bedrock/%s" % BOTOCORE_VERSION, 1), + ], + background_task=True, + ) + @background_task(name="test_bedrock_chat_completion") + def _test(): + with pytest.raises(_event_stream_error): + model = "amazon.titan-text-express-v1" + body = (chat_completion_payload_templates[model] % ("Streaming Exception", 0.7, 100)).encode("utf-8") + + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") + + response = bedrock_server.invoke_model_with_response_stream( + body=body, + modelId=model, + accept="application/json", + contentType="application/json", + ) + list(response["body"]) # Iterate + + _test() + + +@reset_core_stats_engine() +@override_llm_token_callback_settings(llm_token_count_callback) +def test_bedrock_chat_completion_error_streaming_exception_with_token_count( + bedrock_server, + set_trace_info, +): + """ + Duplicate of test_bedrock_chat_completion_error_streaming_exception, but with token callback being set. + + See the original test for a description of the error case. + """ + + @validate_custom_events(add_token_count_to_events(chat_completion_expected_streaming_error_events)) + @validate_custom_event_count(count=2) + @validate_error_trace_attributes( + _event_stream_error_name, + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "error.message": "Malformed input request, please reformat your input and try again.", + "error.code": "ValidationException", + }, + }, + forgone_params={ + "agent": (), + "intrinsic": (), + "user": ("http.statusCode"), + }, + ) + @validate_transaction_metrics( + name="test_bedrock_chat_completion", + scoped_metrics=[("Llm/completion/Bedrock/invoke_model_with_response_stream", 1)], + rollup_metrics=[("Llm/completion/Bedrock/invoke_model_with_response_stream", 1)], + custom_metrics=[ + ("Supportability/Python/ML/Bedrock/%s" % BOTOCORE_VERSION, 1), + ], + background_task=True, + ) + @background_task(name="test_bedrock_chat_completion") + def _test(): + with pytest.raises(_event_stream_error): + model = "amazon.titan-text-express-v1" + body = (chat_completion_payload_templates[model] % ("Streaming Exception", 0.7, 100)).encode("utf-8") + + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") + + response = bedrock_server.invoke_model_with_response_stream( + body=body, + modelId=model, + accept="application/json", + contentType="application/json", + ) + list(response["body"]) # Iterate + + _test() + + +def test_bedrock_chat_completion_functions_marked_as_wrapped_for_sdk_compatibility(bedrock_server): + assert bedrock_server._nr_wrapped + + +def test_chat_models_instrumented(): + SUPPORTED_MODELS = [model for model, _, _, _ in MODEL_EXTRACTORS if "embed" not in model] + + _id = os.environ.get("AWS_ACCESS_KEY_ID") + key = os.environ.get("AWS_SECRET_ACCESS_KEY") + if not _id or not key: + pytest.skip(reason="Credentials not available.") + + client = boto3.client( + "bedrock", + "us-east-1", + ) + response = client.list_foundation_models(byOutputModality="TEXT") + models = [model["modelId"] for model in response["modelSummaries"]] + not_supported = [] + for model in models: + is_supported = any([model.startswith(supported_model) for supported_model in SUPPORTED_MODELS]) + if not is_supported: + not_supported.append(model) + + assert not not_supported, "The following unsupported models were found: %s" % not_supported diff --git a/tests/external_botocore/test_bedrock_chat_completion_via_langchain.py b/tests/external_botocore/test_bedrock_chat_completion_via_langchain.py new file mode 100644 index 000000000..e144868e3 --- /dev/null +++ b/tests/external_botocore/test_bedrock_chat_completion_via_langchain.py @@ -0,0 +1,132 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +from _test_bedrock_chat_completion import ( + chat_completion_langchain_expected_events, + chat_completion_langchain_expected_streaming_events, +) +from conftest import BOTOCORE_VERSION # pylint: disable=E0611 +from testing_support.fixtures import ( + reset_core_stats_engine, + validate_attributes, + validate_custom_event_count, +) +from testing_support.ml_testing_utils import set_trace_info # noqa: F401 +from testing_support.validators.validate_custom_events import validate_custom_events +from testing_support.validators.validate_transaction_metrics import ( + validate_transaction_metrics, +) + +from newrelic.api.background_task import background_task +from newrelic.api.transaction import add_custom_attribute + +UNSUPPORTED_LANGCHAIN_MODELS = [ + "ai21.j2-mid-v1", + "cohere.command-text-v14", +] + + +@pytest.fixture( + scope="module", + params=[ + "amazon.titan-text-express-v1", + "ai21.j2-mid-v1", + "anthropic.claude-instant-v1", + "cohere.command-text-v14", + "meta.llama2-13b-chat-v1", + ], +) +def model_id(request): + model = request.param + if model in UNSUPPORTED_LANGCHAIN_MODELS: + pytest.skip(reason="Not supported by Langchain.") + return model + + +@pytest.fixture(scope="session", params=[False, True], ids=["ResponseStandard", "ResponseStreaming"]) +def response_streaming(request): + return request.param + + +@pytest.fixture(scope="module") +def exercise_model(bedrock_server, model_id, response_streaming): + try: + # These are only available in certain botocore environments. + from langchain.chains import ConversationChain + from langchain_community.chat_models import BedrockChat + except ImportError: + pytest.skip(reason="Langchain not installed.") + + def _exercise_model(prompt): + bedrock_llm = BedrockChat( + model_id=model_id, + client=bedrock_server, + streaming=response_streaming, + ) + conversation = ConversationChain(llm=bedrock_llm) + result = conversation.predict(input=prompt) + if response_streaming: + for r in result: + assert r + else: + assert result + + return _exercise_model + + +@pytest.fixture(scope="module") +def expected_events(model_id, response_streaming): + if response_streaming: + return chat_completion_langchain_expected_streaming_events[model_id] + return chat_completion_langchain_expected_events[model_id] + + +@pytest.fixture(scope="module") +def expected_metrics(response_streaming): + if response_streaming: + return [("Llm/completion/Bedrock/invoke_model_with_response_stream", 1)] + return [("Llm/completion/Bedrock/invoke_model", 1)] + + +@reset_core_stats_engine() +def test_bedrock_chat_completion_in_txn_with_llm_metadata( + set_trace_info, + exercise_model, + expected_events, + expected_metrics, + response_streaming, +): + @validate_custom_events(expected_events) + # One summary event, one user message, and one response message from the assistant + @validate_custom_event_count(count=6) + @validate_transaction_metrics( + name="test_bedrock_chat_completion_in_txn_with_llm_metadata", + scoped_metrics=expected_metrics, + rollup_metrics=expected_metrics, + custom_metrics=[ + ("Supportability/Python/ML/Bedrock/%s" % BOTOCORE_VERSION, 1), + ], + background_task=True, + ) + @validate_attributes("agent", ["llm"]) + @background_task(name="test_bedrock_chat_completion_in_txn_with_llm_metadata") + def _test(): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") + exercise_model(prompt="Hi there!") + + _test() diff --git a/tests/external_botocore/test_bedrock_embeddings.py b/tests/external_botocore/test_bedrock_embeddings.py new file mode 100644 index 000000000..946858e8d --- /dev/null +++ b/tests/external_botocore/test_bedrock_embeddings.py @@ -0,0 +1,485 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import json +import os +from io import BytesIO + +import boto3 +import botocore.exceptions +import pytest +from _test_bedrock_embeddings import ( + embedding_expected_events, + embedding_expected_malformed_request_body_events, + embedding_expected_malformed_response_body_events, + embedding_invalid_access_key_error_events, + embedding_payload_templates, +) +from conftest import BOTOCORE_VERSION # pylint: disable=E0611 +from testing_support.fixtures import ( + override_llm_token_callback_settings, + reset_core_stats_engine, + validate_attributes, + validate_custom_event_count, +) +from testing_support.ml_testing_utils import ( # noqa: F401 + add_token_count_to_events, + disabled_ai_monitoring_record_content_settings, + disabled_ai_monitoring_settings, + events_sans_content, + events_sans_llm_metadata, + llm_token_count_callback, + set_trace_info, +) +from testing_support.validators.validate_custom_events import validate_custom_events +from testing_support.validators.validate_error_trace_attributes import ( + validate_error_trace_attributes, +) +from testing_support.validators.validate_transaction_metrics import ( + validate_transaction_metrics, +) + +from newrelic.api.background_task import background_task +from newrelic.api.transaction import add_custom_attribute +from newrelic.common.object_names import callable_name +from newrelic.hooks.external_botocore import MODEL_EXTRACTORS + + +@pytest.fixture(scope="session", params=[False, True], ids=["RequestStandard", "RequestStreaming"]) +def request_streaming(request): + return request.param + + +@pytest.fixture( + scope="module", + params=[ + "amazon.titan-embed-text-v1", + "amazon.titan-embed-g1-text-02", + "cohere.embed-english-v3", + ], +) +def model_id(request): + return request.param + + +@pytest.fixture(scope="module") +def exercise_model(bedrock_server, model_id, request_streaming): + payload_template = embedding_payload_templates[model_id] + + def _exercise_model(prompt): + body = (payload_template % prompt).encode("utf-8") + if request_streaming: + body = BytesIO(body) + + response = bedrock_server.invoke_model( + body=body, + modelId=model_id, + accept="application/json", + contentType="application/json", + ) + response_body = json.loads(response.get("body").read()) + assert response_body + + return response_body + + return _exercise_model + + +@pytest.fixture(scope="module") +def expected_events(model_id): + return embedding_expected_events[model_id] + + +@pytest.fixture(scope="module") +def expected_invalid_access_key_error_events(model_id): + return embedding_invalid_access_key_error_events[model_id] + + +_test_bedrock_embedding_prompt = "This is an embedding test." + + +@reset_core_stats_engine() +def test_bedrock_embedding_with_llm_metadata(set_trace_info, exercise_model, expected_events): + @validate_custom_events(expected_events) + @validate_custom_event_count(count=1) + @validate_transaction_metrics( + name="test_bedrock_embedding", + scoped_metrics=[("Llm/embedding/Bedrock/invoke_model", 1)], + rollup_metrics=[("Llm/embedding/Bedrock/invoke_model", 1)], + custom_metrics=[ + ("Supportability/Python/ML/Bedrock/%s" % BOTOCORE_VERSION, 1), + ], + background_task=True, + ) + @validate_attributes("agent", ["llm"]) + @background_task(name="test_bedrock_embedding") + def _test(): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") + exercise_model(prompt=_test_bedrock_embedding_prompt) + + _test() + + +@reset_core_stats_engine() +@disabled_ai_monitoring_record_content_settings +def test_bedrock_embedding_no_content(set_trace_info, exercise_model, model_id): + @validate_custom_events(events_sans_content(embedding_expected_events[model_id])) + @validate_custom_event_count(count=1) + @validate_transaction_metrics( + name="test_bedrock_embedding", + scoped_metrics=[("Llm/embedding/Bedrock/invoke_model", 1)], + rollup_metrics=[("Llm/embedding/Bedrock/invoke_model", 1)], + custom_metrics=[ + ("Supportability/Python/ML/Bedrock/%s" % BOTOCORE_VERSION, 1), + ], + background_task=True, + ) + @validate_attributes("agent", ["llm"]) + @background_task(name="test_bedrock_embedding") + def _test(): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") + exercise_model(prompt=_test_bedrock_embedding_prompt) + + _test() + + +@reset_core_stats_engine() +def test_bedrock_embedding_no_llm_metadata(set_trace_info, exercise_model, expected_events): + @validate_custom_events(events_sans_llm_metadata(expected_events)) + @validate_custom_event_count(count=1) + @validate_transaction_metrics( + name="test_bedrock_embedding_no_llm_metadata", + scoped_metrics=[("Llm/embedding/Bedrock/invoke_model", 1)], + rollup_metrics=[("Llm/embedding/Bedrock/invoke_model", 1)], + custom_metrics=[ + ("Supportability/Python/ML/Bedrock/%s" % BOTOCORE_VERSION, 1), + ], + background_task=True, + ) + @background_task(name="test_bedrock_embedding_no_llm_metadata") + def _test(): + set_trace_info() + exercise_model(prompt=_test_bedrock_embedding_prompt) + + _test() + + +@reset_core_stats_engine() +@override_llm_token_callback_settings(llm_token_count_callback) +def test_bedrock_embedding_with_token_count(set_trace_info, exercise_model, expected_events): + @validate_custom_events(add_token_count_to_events(expected_events)) + @validate_custom_event_count(count=1) + @validate_transaction_metrics( + name="test_bedrock_embedding", + scoped_metrics=[("Llm/embedding/Bedrock/invoke_model", 1)], + rollup_metrics=[("Llm/embedding/Bedrock/invoke_model", 1)], + custom_metrics=[ + ("Supportability/Python/ML/Bedrock/%s" % BOTOCORE_VERSION, 1), + ], + background_task=True, + ) + @validate_attributes("agent", ["llm"]) + @background_task(name="test_bedrock_embedding") + def _test(): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + + exercise_model(prompt="This is an embedding test.") + + _test() + + +@reset_core_stats_engine() +@validate_custom_event_count(count=0) +def test_bedrock_embedding_outside_txn(exercise_model): + add_custom_attribute("llm.conversation_id", "my-awesome-id") + exercise_model(prompt=_test_bedrock_embedding_prompt) + + +@disabled_ai_monitoring_settings +@reset_core_stats_engine() +@validate_custom_event_count(count=0) +@background_task(name="test_bedrock_embedding_disabled_ai_monitoring_setting") +def test_bedrock_embedding_disabled_ai_monitoring_settings(set_trace_info, exercise_model): + set_trace_info() + exercise_model(prompt=_test_bedrock_embedding_prompt) + + +_client_error = botocore.exceptions.ClientError +_client_error_name = callable_name(_client_error) + + +@reset_core_stats_engine() +def test_bedrock_embedding_error_incorrect_access_key( + monkeypatch, + bedrock_server, + exercise_model, + set_trace_info, + expected_invalid_access_key_error_events, +): + """ + A request is made to the server with invalid credentials. botocore will reach out to the server and receive an + UnrecognizedClientException as a response. Information from the request will be parsed and reported in customer + events. The error response can also be parsed, and will be included as attributes on the recorded exception. + """ + + @validate_custom_events(expected_invalid_access_key_error_events) + @validate_error_trace_attributes( + _client_error_name, + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "http.statusCode": 403, + "error.message": "The security token included in the request is invalid.", + "error.code": "UnrecognizedClientException", + }, + }, + ) + @validate_transaction_metrics( + name="test_bedrock_embedding", + scoped_metrics=[("Llm/embedding/Bedrock/invoke_model", 1)], + rollup_metrics=[("Llm/embedding/Bedrock/invoke_model", 1)], + custom_metrics=[ + ("Supportability/Python/ML/Bedrock/%s" % BOTOCORE_VERSION, 1), + ], + background_task=True, + ) + @background_task(name="test_bedrock_embedding") + def _test(): + monkeypatch.setattr(bedrock_server._request_signer._credentials, "access_key", "INVALID-ACCESS-KEY") + + with pytest.raises(_client_error): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") + + exercise_model(prompt="Invalid Token") + + _test() + + +@reset_core_stats_engine() +@disabled_ai_monitoring_record_content_settings +def test_bedrock_embedding_error_incorrect_access_key_no_content( + monkeypatch, + bedrock_server, + exercise_model, + set_trace_info, + expected_invalid_access_key_error_events, +): + @validate_custom_events(events_sans_content(expected_invalid_access_key_error_events)) + @validate_error_trace_attributes( + _client_error_name, + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "http.statusCode": 403, + "error.message": "The security token included in the request is invalid.", + "error.code": "UnrecognizedClientException", + }, + }, + ) + @validate_transaction_metrics( + name="test_bedrock_embedding", + scoped_metrics=[("Llm/embedding/Bedrock/invoke_model", 1)], + rollup_metrics=[("Llm/embedding/Bedrock/invoke_model", 1)], + background_task=True, + ) + @background_task(name="test_bedrock_embedding") + def _test(): + monkeypatch.setattr(bedrock_server._request_signer._credentials, "access_key", "INVALID-ACCESS-KEY") + + with pytest.raises(_client_error): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") + + exercise_model(prompt="Invalid Token") + + _test() + + +@reset_core_stats_engine() +@override_llm_token_callback_settings(llm_token_count_callback) +def test_bedrock_embedding_error_incorrect_access_key_with_token_count( + monkeypatch, + bedrock_server, + exercise_model, + set_trace_info, + expected_invalid_access_key_error_events, +): + @validate_custom_events(add_token_count_to_events(expected_invalid_access_key_error_events)) + @validate_error_trace_attributes( + _client_error_name, + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "http.statusCode": 403, + "error.message": "The security token included in the request is invalid.", + "error.code": "UnrecognizedClientException", + }, + }, + ) + @validate_transaction_metrics( + name="test_bedrock_embedding", + scoped_metrics=[("Llm/embedding/Bedrock/invoke_model", 1)], + rollup_metrics=[("Llm/embedding/Bedrock/invoke_model", 1)], + background_task=True, + ) + @background_task(name="test_bedrock_embedding") + def _test(): + monkeypatch.setattr(bedrock_server._request_signer._credentials, "access_key", "INVALID-ACCESS-KEY") + + with pytest.raises(_client_error): # not sure where this exception actually comes from + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") + + exercise_model(prompt="Invalid Token") + + _test() + + +@reset_core_stats_engine() +def test_bedrock_embedding_error_malformed_request_body( + bedrock_server, + set_trace_info, +): + """ + A request was made to the server, but the request body contains invalid JSON. The library will accept the invalid + payload, and still send a request. Our instrumentation will be unable to read it. As a result, no request + information will be recorded in custom events. This includes the initial prompt message event, which cannot be read + so it cannot be captured. The server will then respond with a ValidationException response immediately due to the + bad request. The response can still be parsed, so error information from the response will be recorded as normal. + """ + + @validate_custom_events(embedding_expected_malformed_request_body_events) + @validate_custom_event_count(count=1) + @validate_error_trace_attributes( + "botocore.errorfactory:ValidationException", + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "http.statusCode": 400, + "error.message": "Malformed input request, please reformat your input and try again.", + "error.code": "ValidationException", + }, + }, + ) + @validate_transaction_metrics( + name="test_bedrock_embedding", + scoped_metrics=[("Llm/embedding/Bedrock/invoke_model", 1)], + rollup_metrics=[("Llm/embedding/Bedrock/invoke_model", 1)], + custom_metrics=[ + ("Supportability/Python/ML/Bedrock/%s" % BOTOCORE_VERSION, 1), + ], + background_task=True, + ) + @background_task(name="test_bedrock_embedding") + def _test(): + model = "amazon.titan-embed-g1-text-02" + body = "{ Malformed Request Body".encode("utf-8") + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") + + with pytest.raises(_client_error): + bedrock_server.invoke_model( + body=body, + modelId=model, + accept="application/json", + contentType="application/json", + ) + + _test() + + +@reset_core_stats_engine() +def test_bedrock_embedding_error_malformed_response_body( + bedrock_server, + set_trace_info, +): + """ + After a non-streaming request was made to the server, the server responded with a response body that contains + invalid JSON. Since the JSON body is not parsed by botocore and just returned to the user as bytes, no parsing + exceptions will be raised. Instrumentation will attempt to parse the invalid body, and should not raise an + exception when it fails to do so. As a result, recorded events will not contain the streamed response data but will contain the request data. + """ + + @validate_custom_events(embedding_expected_malformed_response_body_events) + @validate_custom_event_count(count=1) + @validate_transaction_metrics( + name="test_bedrock_embedding", + scoped_metrics=[("Llm/embedding/Bedrock/invoke_model", 1)], + rollup_metrics=[("Llm/embedding/Bedrock/invoke_model", 1)], + custom_metrics=[ + ("Supportability/Python/ML/Bedrock/%s" % BOTOCORE_VERSION, 1), + ], + background_task=True, + ) + @background_task(name="test_bedrock_embedding") + def _test(): + model = "amazon.titan-embed-g1-text-02" + body = (embedding_payload_templates[model] % "Malformed Body").encode("utf-8") + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") + + response = bedrock_server.invoke_model( + body=body, + modelId=model, + accept="application/json", + contentType="application/json", + ) + assert response + + _test() + + +def test_embedding_models_instrumented(): + SUPPORTED_MODELS = [model for model, _, _, _ in MODEL_EXTRACTORS if "embed" in model] + + _id = os.environ.get("AWS_ACCESS_KEY_ID") + key = os.environ.get("AWS_SECRET_ACCESS_KEY") + if not _id or not key: + pytest.skip(reason="Credentials not available.") + + client = boto3.client( + "bedrock", + "us-east-1", + ) + response = client.list_foundation_models(byOutputModality="EMBEDDING") + models = [model["modelId"] for model in response["modelSummaries"]] + not_supported = [] + for model in models: + is_supported = any([model.startswith(supported_model) for supported_model in SUPPORTED_MODELS]) + if not is_supported: + not_supported.append(model) + + assert not not_supported, "The following unsupported models were found: %s" % not_supported diff --git a/tests/external_botocore/test_boto3_iam.py b/tests/external_botocore/test_boto3_iam.py index 72165dd74..7fc927d4f 100644 --- a/tests/external_botocore/test_boto3_iam.py +++ b/tests/external_botocore/test_boto3_iam.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import sys import uuid import boto3 diff --git a/tests/external_botocore/test_boto3_s3.py b/tests/external_botocore/test_boto3_s3.py index 1d91c4636..893e0a6ce 100644 --- a/tests/external_botocore/test_boto3_s3.py +++ b/tests/external_botocore/test_boto3_s3.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import sys import uuid import boto3 @@ -28,7 +27,6 @@ from newrelic.common.package_version_utils import get_package_version_tuple MOTO_VERSION = get_package_version_tuple("moto") - AWS_ACCESS_KEY_ID = "AAAAAAAAAAAACCESSKEY" AWS_SECRET_ACCESS_KEY = "AAAAAASECRETKEY" # nosec AWS_REGION_NAME = "us-west-2" diff --git a/tests/external_botocore/test_boto3_sns.py b/tests/external_botocore/test_boto3_sns.py index a1ffc1331..4e9cdf4d6 100644 --- a/tests/external_botocore/test_boto3_sns.py +++ b/tests/external_botocore/test_boto3_sns.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import sys - import boto3 import pytest from moto import mock_aws @@ -30,7 +28,6 @@ from newrelic.common.package_version_utils import get_package_version_tuple MOTO_VERSION = get_package_version_tuple("moto") - AWS_ACCESS_KEY_ID = "AAAAAAAAAAAACCESSKEY" AWS_SECRET_ACCESS_KEY = "AAAAAASECRETKEY" # nosec (This is fine for testing purposes) AWS_REGION_NAME = "us-east-1" diff --git a/tests/external_botocore/test_botocore_dynamodb.py b/tests/external_botocore/test_botocore_dynamodb.py index 539993206..db4aeabe1 100644 --- a/tests/external_botocore/test_botocore_dynamodb.py +++ b/tests/external_botocore/test_botocore_dynamodb.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import sys import uuid import botocore.session @@ -30,7 +29,6 @@ from newrelic.common.package_version_utils import get_package_version_tuple MOTO_VERSION = get_package_version_tuple("moto") - AWS_ACCESS_KEY_ID = "AAAAAAAAAAAACCESSKEY" AWS_SECRET_ACCESS_KEY = "AAAAAASECRETKEY" # nosec (This is fine for testing purposes) AWS_REGION = "us-east-1" diff --git a/tests/external_botocore/test_botocore_ec2.py b/tests/external_botocore/test_botocore_ec2.py index cfbf48e3b..6d965e27f 100644 --- a/tests/external_botocore/test_botocore_ec2.py +++ b/tests/external_botocore/test_botocore_ec2.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import sys import uuid import botocore.session @@ -30,7 +29,6 @@ from newrelic.common.package_version_utils import get_package_version_tuple MOTO_VERSION = get_package_version_tuple("moto") - AWS_ACCESS_KEY_ID = "AAAAAAAAAAAACCESSKEY" AWS_SECRET_ACCESS_KEY = "AAAAAASECRETKEY" # nosec (This is fine for testing purposes) AWS_REGION = "us-east-1" diff --git a/tests/external_botocore/test_botocore_s3.py b/tests/external_botocore/test_botocore_s3.py index 2f6ccb828..a414646f3 100644 --- a/tests/external_botocore/test_botocore_s3.py +++ b/tests/external_botocore/test_botocore_s3.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import sys import uuid import botocore diff --git a/tests/external_botocore/test_botocore_sqs.py b/tests/external_botocore/test_botocore_sqs.py index c9d8585fd..67744d133 100644 --- a/tests/external_botocore/test_botocore_sqs.py +++ b/tests/external_botocore/test_botocore_sqs.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import sys import uuid import botocore.session @@ -31,7 +30,6 @@ BOTOCORE_VERSION = get_package_version_tuple("botocore") url = "sqs.us-east-1.amazonaws.com" - if BOTOCORE_VERSION < (1, 29, 0): url = "queue.amazonaws.com" diff --git a/tests/logger_logging/conftest.py b/tests/logger_logging/conftest.py index fd75d4e19..63985e938 100644 --- a/tests/logger_logging/conftest.py +++ b/tests/logger_logging/conftest.py @@ -65,7 +65,7 @@ def logger(request): _logger.setLevel(logging.WARNING) # Save instrumentation so we can disable it - instrumented = logging.Logger.callHandlers + instrumented = logging.Logger.callHandlers forwarding_handler = None if request.param == "forwarding_handler": @@ -77,11 +77,11 @@ def logger(request): yield _logger del caplog.records[:] - + _logger.removeHandler(caplog) if forwarding_handler: _logger.removeHandler(forwarding_handler) - + # Reinstrument logging in case it was uninstrumented logging.Logger.callHandlers = instrumented @@ -96,5 +96,5 @@ def instrumented_logger(): yield _logger del caplog.records[:] - + _logger.removeHandler(caplog) diff --git a/tests/logger_logging/test_logging_handler.py b/tests/logger_logging/test_logging_handler.py index ef9194068..f261a2ae3 100644 --- a/tests/logger_logging/test_logging_handler.py +++ b/tests/logger_logging/test_logging_handler.py @@ -141,7 +141,7 @@ def test_handler_dict_message_with_formatter(formatting_logger): "trace.id": "abcdefgh12345678", } ], - forgone_attrs=["message.attr"] # Explicit formatters take precedence over dict message support + forgone_attrs=["message.attr"], # Explicit formatters take precedence over dict message support ) @validate_log_event_count(1) @validate_function_called("newrelic.api.log", "NewRelicLogForwardingHandler.emit") @@ -155,7 +155,7 @@ def test(): # Grab the event directly off the transaction to compare message manually captured_events = list(current_transaction()._log_events) assert len(captured_events) == 1 - + # Accept anything that looks like the correct types. assert captured_events[0].message.startswith("WARNING - {") diff --git a/tests/mlmodel_langchain/_mock_external_openai_server.py b/tests/mlmodel_langchain/_mock_external_openai_server.py new file mode 100644 index 000000000..6eefc9ef2 --- /dev/null +++ b/tests/mlmodel_langchain/_mock_external_openai_server.py @@ -0,0 +1,443 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json + +import pytest +from testing_support.mock_external_http_server import MockExternalHTTPServer + +from newrelic.common.package_version_utils import get_package_version_tuple + +# This defines an external server test apps can make requests to instead of +# the real OpenAI backend. This provides 3 features: +# +# 1) This removes dependencies on external websites. +# 2) Provides a better mechanism for making an external call in a test app than +# simple calling another endpoint the test app makes available because this +# server will not be instrumented meaning we don't have to sort through +# transactions to separate the ones created in the test app and the ones +# created by an external call. +# 3) This app runs on a separate thread meaning it won't block the test app. +STREAMED_RESPONSES_V1 = { + "You are a world class algorithm for extracting information in structured formats.": [ + { + "content-type": "text/event-stream", + "openai-model": "gpt-3.5-turbo-0125", + "openai-organization": "foobar-jtbczk", + "openai-processing-ms": "511", + "openai-version": "2020-10-01", + "x-ratelimit-limit-requests": "200", + "x-ratelimit-limit-tokens": "40000", + "x-ratelimit-remaining-requests": "196", + "x-ratelimit-remaining-tokens": "39924", + "x-ratelimit-reset-requests": "23m16.298s", + "x-ratelimit-reset-tokens": "114ms", + "x-request-id": "req_69c9ac5f95907fdb4af31572fd99537f", + }, + 200, + [ + { + "id": "chatcmpl-8uUiO2kRX1yl9fyniZCjJ6q3GN8wf", + "object": "chat.completion.chunk", + "created": 1708475128, + "model": "gpt-3.5-turbo-0125", + "system_fingerprint": "fp_69829325d0", + "choices": [ + {"index": 0, "delta": {"role": "assistant", "content": ""}, "logprobs": None, "finish_reason": None} + ], + }, + { + "id": "chatcmpl-8uUiO2kRX1yl9fyniZCjJ6q3GN8wf", + "object": "chat.completion.chunk", + "created": 1708475128, + "model": "gpt-3.5-turbo-0125", + "system_fingerprint": "fp_69829325d0", + "choices": [{"index": 0, "delta": {"content": "The"}, "logprobs": None, "finish_reason": None}], + }, + { + "id": "chatcmpl-8uUiO2kRX1yl9fyniZCjJ6q3GN8wf", + "object": "chat.completion.chunk", + "created": 1708475128, + "model": "gpt-3.5-turbo-0125", + "system_fingerprint": "fp_69829325d0", + "choices": [{"index": 0, "delta": {"content": " extracted"}, "logprobs": None, "finish_reason": None}], + }, + { + "id": "chatcmpl-8uUiO2kRX1yl9fyniZCjJ6q3GN8wf", + "object": "chat.completion.chunk", + "created": 1708475128, + "model": "gpt-3.5-turbo-0125", + "system_fingerprint": "fp_69829325d0", + "choices": [ + {"index": 0, "delta": {"content": " information"}, "logprobs": None, "finish_reason": None} + ], + }, + { + "id": "chatcmpl-8uUiO2kRX1yl9fyniZCjJ6q3GN8wf", + "object": "chat.completion.chunk", + "created": 1708475128, + "model": "gpt-3.5-turbo-0125", + "system_fingerprint": "fp_69829325d0", + "choices": [{"index": 0, "delta": {"content": " from"}, "logprobs": None, "finish_reason": None}], + }, + { + "id": "chatcmpl-8uUiO2kRX1yl9fyniZCjJ6q3GN8wf", + "object": "chat.completion.chunk", + "created": 1708475128, + "model": "gpt-3.5-turbo-0125", + "system_fingerprint": "fp_69829325d0", + "choices": [{"index": 0, "delta": {"content": " the"}, "logprobs": None, "finish_reason": None}], + }, + { + "id": "chatcmpl-8uUiO2kRX1yl9fyniZCjJ6q3GN8wf", + "object": "chat.completion.chunk", + "created": 1708475128, + "model": "gpt-3.5-turbo-0125", + "system_fingerprint": "fp_69829325d0", + "choices": [{"index": 0, "delta": {"content": " input"}, "logprobs": None, "finish_reason": None}], + }, + { + "id": "chatcmpl-8uUiO2kRX1yl9fyniZCjJ6q3GN8wf", + "object": "chat.completion.chunk", + "created": 1708475128, + "model": "gpt-3.5-turbo-0125", + "system_fingerprint": "fp_69829325d0", + "choices": [{"index": 0, "delta": {"content": ' "'}, "logprobs": None, "finish_reason": None}], + }, + { + "id": "chatcmpl-8uUiO2kRX1yl9fyniZCjJ6q3GN8wf", + "object": "chat.completion.chunk", + "created": 1708475128, + "model": "gpt-3.5-turbo-0125", + "system_fingerprint": "fp_69829325d0", + "choices": [{"index": 0, "delta": {"content": "Hello"}, "logprobs": None, "finish_reason": None}], + }, + { + "id": "chatcmpl-8uUiO2kRX1yl9fyniZCjJ6q3GN8wf", + "object": "chat.completion.chunk", + "created": 1708475128, + "model": "gpt-3.5-turbo-0125", + "system_fingerprint": "fp_69829325d0", + "choices": [{"index": 0, "delta": {"content": ","}, "logprobs": None, "finish_reason": None}], + }, + { + "id": "chatcmpl-8uUiO2kRX1yl9fyniZCjJ6q3GN8wf", + "object": "chat.completion.chunk", + "created": 1708475128, + "model": "gpt-3.5-turbo-0125", + "system_fingerprint": "fp_69829325d0", + "choices": [{"index": 0, "delta": {"content": " world"}, "logprobs": None, "finish_reason": None}], + }, + { + "id": "chatcmpl-8uUiO2kRX1yl9fyniZCjJ6q3GN8wf", + "object": "chat.completion.chunk", + "created": 1708475128, + "model": "gpt-3.5-turbo-0125", + "system_fingerprint": "fp_69829325d0", + "choices": [{"index": 0, "delta": {"content": '"'}, "logprobs": None, "finish_reason": None}], + }, + { + "id": "chatcmpl-8uUiO2kRX1yl9fyniZCjJ6q3GN8wf", + "object": "chat.completion.chunk", + "created": 1708475128, + "model": "gpt-3.5-turbo-0125", + "system_fingerprint": "fp_69829325d0", + "choices": [{"index": 0, "delta": {"content": " is"}, "logprobs": None, "finish_reason": None}], + }, + { + "id": "chatcmpl-8uUiO2kRX1yl9fyniZCjJ6q3GN8wf", + "object": "chat.completion.chunk", + "created": 1708475128, + "model": "gpt-3.5-turbo-0125", + "system_fingerprint": "fp_69829325d0", + "choices": [{"index": 0, "delta": {"content": ' "'}, "logprobs": None, "finish_reason": None}], + }, + { + "id": "chatcmpl-8uUiO2kRX1yl9fyniZCjJ6q3GN8wf", + "object": "chat.completion.chunk", + "created": 1708475128, + "model": "gpt-3.5-turbo-0125", + "system_fingerprint": "fp_69829325d0", + "choices": [{"index": 0, "delta": {"content": "H"}, "logprobs": None, "finish_reason": None}], + }, + { + "id": "chatcmpl-8uUiO2kRX1yl9fyniZCjJ6q3GN8wf", + "object": "chat.completion.chunk", + "created": 1708475128, + "model": "gpt-3.5-turbo-0125", + "system_fingerprint": "fp_69829325d0", + "choices": [{"index": 0, "delta": {"content": "elloworld"}, "logprobs": None, "finish_reason": None}], + }, + { + "id": "chatcmpl-8uUiO2kRX1yl9fyniZCjJ6q3GN8wf", + "object": "chat.completion.chunk", + "created": 1708475128, + "model": "gpt-3.5-turbo-0125", + "system_fingerprint": "fp_69829325d0", + "choices": [{"index": 0, "delta": {"content": '"'}, "logprobs": None, "finish_reason": None}], + }, + { + "id": "chatcmpl-8uUiO2kRX1yl9fyniZCjJ6q3GN8wf", + "object": "chat.completion.chunk", + "created": 1708475128, + "model": "gpt-3.5-turbo-0125", + "system_fingerprint": "fp_69829325d0", + "choices": [{"index": 0, "delta": {}, "logprobs": None, "finish_reason": "stop"}], + }, + ], + ], +} +RESPONSES_V1 = { + "You are a world class algorithm for extracting information in structured formats.": [ + { + "content-type": "application/json", + "openai-model": "gpt-3.5-turbo-1106", + "openai-organization": "foobar-jtbczk", + "openai-processing-ms": "749", + "openai-version": "2020-10-01", + "x-ratelimit-limit-requests": "200", + "x-ratelimit-limit-tokens": "40000", + "x-ratelimit-limit-tokens_usage_based": "40000", + "x-ratelimit-remaining-requests": "197", + "x-ratelimit-remaining-tokens": "39929", + "x-ratelimit-remaining-tokens_usage_based": "39929", + "x-ratelimit-reset-requests": "16m17.764s", + "x-ratelimit-reset-tokens": "106ms", + "x-ratelimit-reset-tokens_usage_based": "106ms", + "x-request-id": "f47e6e80fb796a56c05ad89c5d98609c", + }, + 200, + { + "id": "chatcmpl-8ckHXhZGwmPuqIIaKLbacUEq4SPq1", + "object": "chat.completion", + "created": 1704245063, + "model": "gpt-3.5-turbo-1106", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": None, + "function_call": {"name": "output_formatter", "arguments": '{"name":"Sally","age":13}'}, + }, + "logprobs": None, + "finish_reason": "stop", + } + ], + "usage": {"prompt_tokens": 159, "completion_tokens": 10, "total_tokens": 169}, + "system_fingerprint": "fp_772e8125bb", + }, + ], + "You are a world class algorithm for extracting information in structured formats with openai failures.": [ + {"content-type": "application/json; charset=utf-8", "x-request-id": "e58911d54d574647d36237e4e53c0f1a"}, + 401, + { + "error": { + "message": "Incorrect API key provided: No-exist. You can find your API key at https://platform.openai.com/account/api-keys.", + "type": "invalid_request_error", + "param": None, + "code": "invalid_api_key", + } + }, + ], + "You are a helpful assistant who generates comma separated lists.\n A user will pass in a category, and you should generate 5 objects in that category in a comma separated list.\n ONLY return a comma separated list, and nothing more.": [ + { + "Content-Type": "application/json", + "openai-model": "gpt-3.5-turbo-0613", + "openai-organization": "foobar-jtbczk", + "openai-processing-ms": "488", + "openai-version": "2020-10-01", + "x-ratelimit-limit-requests": "200", + "x-ratelimit-limit-tokens": "40000", + "x-ratelimit-limit-tokens_usage_based": "40000", + "x-ratelimit-remaining-requests": "199", + "x-ratelimit-remaining-tokens": "39921", + "x-ratelimit-remaining-tokens_usage_based": "39921", + "x-ratelimit-reset-requests": "7m12s", + "x-ratelimit-reset-tokens": "118ms", + "x-ratelimit-reset-tokens_usage_based": "118ms", + "x-request-id": "f3de99e17ccc360430cffa243b74dcbd", + }, + 200, + { + "id": "chatcmpl-8XEjOPNHth7yS2jt1You3fEwB6w9i", + "object": "chat.completion", + "created": 1702932142, + "model": "gpt-3.5-turbo-0613", + "choices": [ + { + "index": 0, + "message": {"role": "assistant", "content": "red, blue, green, yellow, orange"}, + "logprobs": None, + "finish_reason": "stop", + } + ], + "usage": {"prompt_tokens": 60, "completion_tokens": 9, "total_tokens": 69}, + "system_fingerprint": None, + }, + ], + "9906": [ + { + "content-type": "application/json", + "openai-organization": "new-relic-nkmd8b", + "openai-processing-ms": "23", + "openai-version": "2020-10-01", + "x-ratelimit-limit-requests": "3000", + "x-ratelimit-limit-tokens": "1000000", + "x-ratelimit-remaining-requests": "2999", + "x-ratelimit-remaining-tokens": "999996", + "x-ratelimit-reset-requests": "20ms", + "x-ratelimit-reset-tokens": "0s", + "x-request-id": "058b2dd82590aa4145e97c2e59681f62", + }, + 200, + { + "object": "list", + "data": [ + { + "object": "embedding", + "index": 0, + "embedding": "0TB/Ov96cDsiAKC8oBytvE/gdrsckEQ6CG5svFFCLDz4Vr+7jCAqvXdNdzx16EY8T5m2vJtdLLxfhxM7gEDzO8tQkzzAITe8b08bPIYd5DzO07O8593cO8+EDrsRy4I7jI2/vAcnrDvjPMw7ElaIvB/qFD2P5w+9kJvlPMLKrLzMl1O8DAwCvAxTwjuP54+7OsMIuu26TbxXjLI8ByesvHCWWzydczc7dF3BO6CJwjkzeQK9vQssPI42NTqPVKW8REEKO7GVjzx42Hw8xXOiuzhh07wrYLE8JDwAvS0Jp7oezKS8zxr0PEs/5jwNBB28NFMtvMKGZzt1wvG8pFAoPInkSbyZjuE7AmirOx1BHzzDN0K8cSHhPNCl+Ty5k2u8yp84vDjOaLzyDLk8jlyKO1FrfLywd587qi0ZPN0QNryGYak8fFC9vLZ94LuRkIU8x7L9OwHdJTwDhpu7sKDvPLajtbx7Ms28eRzCOp2ZjDoRpa07ZNx5PGMoJLzrL8g7KkJBvJvwFjzEwke7RK4fvWlyKrxWAS281c4UvNX3ZLz0SBm8+k7au3YsDDzoaGI7+ZqEPPatSTuNPpq8vXjBPGHsQ7yLb8+8D48iO5OxcLsb32k80KX5O+ShfLtErp+8L5SsPP07FT3C8IE8eYnXvH/5MjwHupY6EK0SvWVkhLzW7AS826uFvPGfIz2dczc8z8tOPB+Aejufa9K8bsSVPHj+UTlFOaU8kZAFvA5L3bwv22w86YZSPG/ihTxOv4u82QKQPDu7ozwqhoY8hJJevIBA8ztSYBy8EsMdPBpUZDxs9co7TTQGvH0q6Do3hyg8fJ9iO2wboDwot7s7vryGvHrNnLrLUBO8SSnbu4cSBL2e4Ew8JTSbPOmG0jxdJV47arlqvHBti7zZmHW8q0sJPIZhKb3mcMc8glZ+vOqkwjuBoqi7lcSAPKb5nbw2/KK8GMnevE00BjylAQM8y3njPDW43brZ3Do6O06OPERBirtcmlg8D2lNvKUBAzzzcek84mKhPMhjWDy0GDC/PmSZu8VzIjxfYT480nTEu3j+UTyTG4s8y1CTPIPeiLu0PoU8YruOu2z1SjyiFEi7ZY1UvPZeJLzV92S8K83Guq47v7weObq8PUYpPMM3QrwUKM48nA4HvFVQUrw4OIM8jYVaOuisJ7l+H4i8TTQGOoVDuTxbLUO8/1GgPMZrPTx16Ea6MIxHPR2uNDzLKr67QgUqPCLayjuONrU8z/EjvEK2hLxGpjo7P8nJvHvFN7zLeeM7frXtPDN8/Tv4Vr+7rmGUu0amujxnyTS8ApF7PPZeJDyvFWo7AmirO6rAAz14/tE7syAVu20TOzsMD326gsCYOj+CiTqDB1k8rs6pvDM1vTwkFqu8+2xKPG9Pm7x+bi28XHGIurhyALzDEW2802xfvEJyvzzuHAM9JfDVPGClA7v8ZGW8fQGYPJgDXDxITzA7QA2PvA3A17wwspw8WPnHu5Xt0Lz7bMo6pL29uZFMwDutiuQ8I4slPN7BkLyS18W7q0sJPTGqtzvR6b67WKoiPPME1Dwx0Iy7EhLDO5QTJrzT/0m8nFVHO8ccmDwEzVu70uFZvGVkBD3xnyM9ZWf/vHOsZjuwCgq8VeM8veCT1jwUKM46hxIEvfX87ruFQ7k7dMrWPDN8fby9MYE8RcwPPKnp0zy7z0u8vFpRPB+Aeju9NHy8FQL5O+HXG7xljVS8TBaWPPOXvjrrwrI8UUIsvH5I2DsCaKu70TB/PKLFIrxowU889xJ6OZ2ZDLyZIcy7poyIPOrKl7zGkZI8c6zmvAzmrLwp/vs6TiwhOuchIrxJ2jU8vIAmvNqNFb1gEpk7J5lLuxtJBLxy0rs7FLu4vMJdF70xZvK89q3JuinVqzxLP2Y7frXtuqUBAzvVis+8tD6FvKGnMjykl2i7TiwhvZDBujx0Dhy87x9+vOAAbDoWs9O7qi2ZO9kCkLyF1iO8bsSVvAKR+7vNSK66O7ujuyn7gLz+M7A7W+YCPYooDzvmA7I72QKQPBfRQ7wSEkO4DQQdPJvwFjyZIcy8uAhmOsPoHLwP1uK52klQPBLDHbxxIWE8prXYPNCl+Tx764y6powIPV5DzrzfTBa79WYJvag4+TsaBb+8ysUNOyn7gDyBoig8BnZRvIXWI7uJCh88eYnXPJi0tjyNPho7OgpJvO5rqLzaIAC86PtMvBaKgzywM1q8LQmnu59CArq0PgU95J4BPNwYGz1pcqo7eRzCvGEwCb24coA8N4coPFEc17uj45K8OPS9u9XOlDwEzVu8gIQ4PHC8MDz4w1S8OgpJPEbt+jzchbC80S0EPI2FWjx9Kmg8WD0NOgYJvLkeps887HMNO1V2p7qOXAq8LBEMO4OaQ7zviRi9jNT/u8C0IbyRkAU8BS8RPaKBXTxV4zw8O06OOylolrmkl+g7T+B2vOCT1juKvnS8hJLeu29Pm7xVvWe8jNT/u3Xoxjw++n68f/myOzLIJ7vEnHK7H1eqO2z1SjxOVfE7z/GjvAqEd7xUWDe7sDNaPJEma7rLvSi8W+YCvUkACzzXDfA7FChOu5JqsLyY2gs8YKUDPN/fAL3fdWY8ZCA/OyG82jx0XcG8OgpJOee0DLzbq4U8qenTO6Zms7wHupa87HONPB71dLuaGec6KSTRuw9pTbuTsfA56+gHPN2jIDwpaBa7y1ATPKAcrTxx2iC6GyMvOug/EjwdG8o8q7geu9pJ0Ls4zmi7X87TvGq5arzl5UE902xfPI2rr7pS84Y8y1CTvHx2ErzQpfm8yGPYvHckJ7ynF4472iP7uk/g9juhOh07k4ggPKmiEzwXgh66JujwOWHGbrwuJ5e6637tu2h6j7sIAdc7/RVAO3CWWzyvWa88CEWcO3x2Ejxtz/U7zbVDvPc4z7xkRhS6mNqLuw/8NzzMl1M8kHIVuxz92Two3ZA8tYVFPRu2mTsF60u8bPVKPGB/LjzgJsG79WYJvEGYlDto56S5RBs1O16wYzwnLDa8vrwGvVkXuDxQJLw7Juhwu92joLxFYnW83X3LO+LPtrsKhPe7vZ6WvCe/IL1rRPC8mAPcvLGVj7zem7u7nS9yvPGfI70gCAW9CWMMPArQobzgaga8hvSTu5UxFr3JFLO8OlluPAG30DycVUc8EBoovGwbILxGVxU8cSHhO4zUf7uLSfq8aOckPN8I0btNDrE8VpQXuqRQqDwp+4C75nDHu70xAT0iACA7rqhUPEHnuTwOcTK8YVnZO8Ok1zv4Vr86WsgSvBtyVLzJ7t07LBEMvH9mSLy2o7U5OsMIPIMHWTsZ5048kC7QPAPzsDxYPY28V7KHOYyNPz0++n68z/GjPHC8MLlzgxa8mSHMPG/iBT21NqA8BuNmvA2XB725aps5xAaNvC8BQjzOZp48q3RZuiP4Ortwllu8nXO3vAqEdzrtlPi6w+icu8oyozvA+2G8+XSvOFxLs7w6w4i5uh7xOD5kmTyxUUq8wzfCu3Eh4byOXIq85AuXOcMRbbyJ5Em93TYLvV5DTrztus079EgZvMGsPDymtVi8GMleO5dPhryjMji74mKhO/olCr3aIAC7ye5dPN9MljwF60s8eNUBPUhPsDsfgPo7X4eTO/mdfzvem7u7jRhFvG8L1rw7KLm84LmrPKRQqDwx0Aw9P4KJuzVLSDsJ+XE8W+l9OXjVAbxE12+7i29POzkSLrzG/qe88VteuT9cNDrKnzg8B7qWO7dUELxbLcM7ysWNOxyQRDwdrjQ8aFS6PKVIw7sGdtE8U+shPNtnQDsfgHq8nS9yu7ebULuwoG88cxYBPJXHezytQ6Q8vKl2vGz1SrsvlCw702zfPCQWKzx2c0w7URzXu2tEcLpXSO07cbRLvIHL+Lv1QDS8JceFOotJery79aC8HUEfPCLaSrwkPAC9YKWDu23PdbnNSC49q7iePHvrDDwFfrY82W+lO8nu3TsXgp48lymxvO+JmLoeXw89c/CruqQq07us/168dKGGOu8ffjszeYK7ZEYUvdpJ0Lolg8A8YKUDu70LrLwkqZU7x68CvZFMQDx+tW07iQqfvDvkc7wGCTw8OlnuvAxTQjz9O5W8ULemPFEc1zwo3RC8mAPcOggBV7thMAk8mANcurZ9YLyNhdo8H1eqvJG5VTy9NHw8FxWJu4gz77pCcj+7uf2FvE8GzDyXKbG8kxuLO/Gfo7tvT5s84+0mvOe0DDywoO+7ty47u2c2yrplZIS8TPDAPKAcLTyfkSe7TcrruyjdkLyVxIC8DHkXvYMtLjugRf075AuXvF5pIzz0SJm7Hjk6POxzDTzHia08zfmIu5wOhzxG7fo83RC2vM8a9Lv2h3Q8sVFKPG05kLzAtKE8Pvr+uryp9rpP4PY7MB8yvABwkLz4Vr87mhnnOtkCkDvG/qe7gaIoPHOs5jyzIJU8v3DcO50vcrwKPbe8xif4PLU2ILt/jB28mj+8OqySSbxduEg8uEwrvI6jSru8E5G7k7HwvO5rKLwYyd465imHOtSwpDs5pRi8prXYvHo6MjqGHeS8BKSLO9YV1Tu8gCa8zUiuuxsjLzv3Eno8sVHKuk9KETygr5e71w3wu9RDjzkRy4K8EWFovH7bwruybzo9BpwmPNczxTuVxAC8PUYpvDUECL1XH528pJfougVYYToMeZc7kHKVPCnVKzu0PoU8/jOwuvEyDjyI7C686D+SvAwMgrouugG8dXuxPNX35LvxW968M6JSO8yXUzs1cZ08s7Z6Ow/8tztsiDW8kxuLu7HktLwSw528JKmVOmhUOjzrfm084GqGvAwMgrseXw883RC2O2VkBDsYXMm8JYNAOoIPvry2EMs8bRM7vC4nlztFYnU8thDLvH5I2Dw+0S685imHPNcN8DywM1o8mLS2O6Pjkrq5Jta7jCCquSVdazz46Sm6cSFhO2uuCjz+oEW8tqO1vKcXjryONrU6xU3NvD/vHrwrOtw75KH8PKJYjTxPShG9wdKRvGA76byl2y0844ARPFProbzFc6K6AbdQvEMjmjpgpQM8s/q/vMevgrsamKk8Sz9mPNRDD7qmtdg8kSZrPvVmCbywCoo71hXVPDFm8jwFWGE8BetLPDRTrbtBweQ7UCS8O89eObyNhdq7GMlevBeCnjvnjrc768IyPAeUwbxlZ/+84ovxvOxzjbzRLYQ7/1GgvKHNh7wD87C8ukRGPCMekDtQkVG8z4SOu32UAj29npa6IbxavJhHobt+tW07F9HDPFo1qLwzolK85yEiPWq5ajy9MQE905I0uxAaqLwK0KG8Jg5Gu23P9TstxWE7BycsPI2F2rv7sA+94ADsO8ey/TyIWcS7oEV9PImdiTzIOgg9aS5lPMu9qLy4ucA8ZlyfPPtsSrza+iq8c6xmu9MlHz2QLtC7FUa+POo3rbygRX27/jMwvWr9L7sHupa6RNdvvAvukbwmobC7LrqBu2HG7jrwgbO8AUo7vLICJTxUxcw73X3LPGku5TxI4pq8iigPvJOIILxNyuu8S6kAvUSuH700Uy08XJpYvI6jSrwT4Y28OlnuOzowHrwcau+5X85TPP6gRTwyWxI8Nmm4Ow3AVzxVvee7AUo7vFZuwrvdNos8l7wbPKrAg7t3TXe8baYlPDdD47tUMmI87HONOw5xsrt9lAK92iCAvMevArotxeE7h6jpvAG30Du79aC8ApH7uYjsrjvcX1u8l5bGOmz1Srwxqje8I/g6PHe3kbrRMH+8P++eu30qaDx4/lE8MT0iverKlzpunkA79xL6POj7TDzAIbc7fSpoPKPjkjvJFDM7nHucu5JqsLt9vVI7piJuvD7RLjzaI3u84LkrvGTc+bweps88Ru36vBD00rvuayg6NxoTvfmaBLpANl+8PG/5u2yINT3D6Jy7LBEMvMsqvrzoaOI7Im01uzN5grxCBao75eXBPMYn+LtRQiy9k7HwvHivLL789088ehRdPOSegbwi2ko8+9lfO4ZhKTy+vIY8ctI7O3jY/Dux5LS6z/GjvJqDAbxrrgq9MWbyuyQ8gLzviRi8ygzOtjVxHT15YIc8hLgzPbMglbvWWZo7zmYeux9XqjtGE1C87muoO4kKHz0kPAC7Qee5vNNsX7za+qq8UdUWPXMWgTyEkt48HvX0u3yfYjvfTBa9/jMwvNSwpDwhvNo8WjUoPIhZRDzYUTW8e8W3vGJ3ybs6MJ4818Yvu2ASmTz9FUC83PJFPHtYorvO0zO7jRjFumHG7jzHia08tqO1u6/smTyM+lS7JNLlu1iqIrzkoXy8RTmlu0naNbzZmPW7DAwCumyINbxG7fo7fkhYvGOVuTyMICq8HvX0vAo9N7xWJwK9ZCC/O24xqzu9nha9xMLHPK2K5DodrjS8sAqKvIzUfzzpGT08cdogPHPwK7z+MzA8f4wdvIE1Ezzp8+e8U+uhvG7ElTwVRr68pFCou35urbwJY4w7qDh5PCTS5TsV2ag8pCpTvA5LXbxFOSU6uN+VPAljjLwrzca6fQEYPfFbXrz5dK88vTT8O34fCD2kUKi8t1QQvD0g1DtpLuU85463PL0xgTx0ytY8RleVuw3AV7wHuha7aFS6ukIFKj1/Zkg82iP7vOldAjyVMZa7pCpTOjaPjb2aGee8qpouOXrNnDwIRRw8zNsYPUOQr7tHMcA7wPthujowHjxQt6Y7PqtZvC/b7DuyAqW7l08GPdfGr7xQt6Y8NxqTvJ9r0rvTJZ88uf0FuyMeELzPy048hxKEvKu4nrxUWDc8AY6AvGtE8DxTp1w77ti9u6jxuLxKtOC8S9JQu0K2BLyEuDO6UmCcPOBqBr1iUXQ8yGNYPDEXzbzleKw8KfuAPIq+9LuJnQm98KeIuzW4XTzG/ic7uh7xPEA23zuixaK83sGQvJaeK71KR8u8fHaSPG05kDubXSy74vWLPHCW2zwb32m7vKn2O7XJCjxksyk7KWgWvbgI5jwBjgC8U6dcPM/LTryhp7I8AAb2PPwdpTsnUou8jja1PJ+RJ7wOS908P+8evZ4kkjwFLxG8GXq5vNaoPzxG7Xo8TlXxvGhUurw0wMK6M+YXvKZmszzgaoY8cxYBvPl0rztHxCq8Z8k0va/sGbyzIJU857SMPF5DTrw/gok7ipWkPDpZbrrHiS27QnK/PEhPsDxLqQC9j1SlvM7Ts70J+fE8nKTsu7qIi7wp1as8uEyruZmOYTx+tW282o0VvLONKrt2maG8m8pBu/FbXryqw348K83GvG3P9bvmKQc9d5G8PM5Aybsc/Vm7OlnuOp4kkrye4Ey8wLShO0fEKry6HvE7f4wdvZAuULsQh725LrqBvLjflTtmXB+9VicCvEbt+jzrL8g7NUvIux7MpDuONrU8woZnvBLDnTx42Py861WdvNlvpTzguSs8GedOu1zenTqtQyQ76GjiOrZ94DwQ9FI8lDz2O7lqmzzF4Le8jPpUu/VmibzntIy8mY5hvJCbZbphxm67vO07veMW9zzZAhC8/sYaPdQdurv1/G47pFAoOxHLgrwBjgC8sAqKuwjYhryWnqu7AkJWvG05EDyRTMA8mY7hPOxNuLh6OjK8YnfJPEltIL3Rmhk93TYLPXXC8TuRJuu81ffkOXxQvTxhnR48frXtOurKlzrM25i7UdWWOyjdEL3hRDG8DktdO7wTkTxtORA8RTklOy5Q57tDkK86ULemPHsyTTzgJkE8635tPNXOlLtduEg7W+aCuxPhjTp5iVe8/PfPvCqvVjyLSXo78wRUOuJioTwFLxG7E3dzPBtJBL3Hsv04v3DcPGK7jjtrrgq9qaITPcPoHLxIvMU7+ZoEPRJWiLyF1qM7E3fzOEIFqjttz3W8XrDjPOho4rsM5qw8AvuVu/fLubsX0cO8RhPQOaySSTjuHIM77kVTPLpERrxk3Hk9JDwAPGqQmrt8n2I8YcZuPLU2oDzPXrk8oK8XPO26TbzA++G8fJ9iu5o/vDuvf4S8ODgDvTFm8rtDI5q7Nd6yvIeoabzBP6e8iMbZPOtVnTw6WW48GXq5PLxa0TuAqo28vKl2vNbsBDwJY4w7yDqIvAwP/bys/948frVtuxorlLyLs5S8SSlbO1OnXDsk0mW7fSrou68V6rtHxCo8CzXSPFvmAjvVO6q8UGgBvfmahDsI2IY8BVjhvAljjLsiR+C8", + } + ], + "model": "text-embedding-ada-002-v2", + "usage": {"prompt_tokens": 4, "total_tokens": 4}, + }, + ], + "12833": [ + { + "content-type": "application/json", + "openai-organization": "new-relic-nkmd8b", + "openai-processing-ms": "26", + "openai-version": "2020-10-01", + "x-ratelimit-limit-requests": "3000", + "x-ratelimit-limit-tokens": "1000000", + "x-ratelimit-remaining-requests": "2999", + "x-ratelimit-remaining-tokens": "999994", + "x-ratelimit-reset-requests": "20ms", + "x-ratelimit-reset-tokens": "0s", + "x-request-id": "d5d71019880e25a94de58b927045a202", + }, + 200, + { + "object": "list", + "data": [ + { + "object": "embedding", + "index": 0, + "embedding": "d4ypOv2yiTxi17k673XCuxSAAb2qjsg8jxGuvNoQ1bs0Xby8L/2bvIAk6TxKUjU8L3UfvLozmrxa94a7e8TIvIoBED0Cw6c44Ih9PKFGi7wb6LC76DWUvFUqfjvmzQm8dAwXvOqNpbxEsgs8JhB8vHiksTv9sgk6ZX9Nu+aliLrQiA688+VbvI7Bqzvh2H+8IQBevICEZLyiDpE8jpmqvFw3ED0lIPU7f+RfPNVgMrxoJ+G8kyHMO6hOvzuKAZC8Yb+xvIoBkDwK89w8L3UfPGX30LyxHnm8znAGvHOUEzuyvn28v7u7PLmTFbz8moG8wSNGu2SPxrvnvZC8fIzOPPJFV7mh5o+76f0Zu7K+/bc2Zcu7oB6KPKxGVTyo/jw8/toKvM/oiTvdGGQ8a6fzO8VbZTt4fLC79RXsuwwj7bpPitQ86hUivVvnDb0zbbU8eFQvPfPlWzv2BXO8/8qRPPC1S7zg6Pi8etTBO9TArTyI+QC8Lb0SPSCYU7w6/WU8DmP2O0/a1jsJ29Q7/WKHvAC7mLvFu2C850WNO2aX1TrOqIC7GnAtu6hOv7o77Ww7L02ePM+Yh7tffyg7Mh0zPQRTMzwXyBm9FRCNu6tWTrwsLQc86DWUvL+7u7sGM8G8rp5mOwWTvDwlcPc7xGvevOb1ijsr3QQ81ni6vJsBf7wioGI8Ok3oumX30Dyhlo27eoS/O6UGp7rQ2JC8JDDuPINU+bxQyt08irGNu94Ia7wxjSc7bDf/PEXylDvO+IK8AUukujIdMzwxLaw84dh/vNfwvbvtDTg4LR2OPMW7YDt9fNU8Y3e+Ozld4TwHI8g81ti1u7vTHj30dec7GzgzvARTMzyNqSM8x5tuPJZR3Lw3pVQ6QzoIPXcErTwBSyQ7OM1VPD3Nerx4HLW8XheePNPQJr0l0PI7lDnUOtx437p8ZM08fzRiPNmoyrwt9Qy9c2ySvNqYUTwrBYY7703BPJUB2juPES47faRWPEYKHbzcKN08MMWhvKQ+oTts5/w8A4stPUmKr7p4VC+/VSr+u45xKTyT+cq6lqHePDgd2Duv7mi6ThLRPFkHADscULs8lMFQvJjR7jwu5ZO75d0CvO/FRDp3LK67DCNtO2M/xLtbD4+8mrF8u9TALb339fk6BZO8uzOVNjwYQB08k0nNuuoVojzO0IG8ZN/IvGoHbzwH+8a8g/T9OwLrqDzBw0o8xVtlPedtjjzQiA68MkU0POz1r7xF8hQ9IqBiO5sBf7zszS68FKgCvTDFITygfoW8ixkYPHVMILxqB288rp7mu4yRmzwhAF68PI3xO71jKjy/a7m7sR75OkXKkzyXQeO7JsD5uxtgNDyoTr+3JSB1PDPlOLxod+O7L02evKsuTTz9EoW8JhB8vGq3bDyU6VE6l0FjvDM1uzsKQ9+8dUygu0yCRTxbl4s8HfA/vKXeJbzrLSo8XP+VPGun8zzJe/y6lQFavKNOGr1+9Ng8uKMOvVvnDb13BC27751DO4F0azwc2Lc8XU+YPKFGC7zvdUK86j0jPHFkA7xCcgK9HrjFPIIEdzuKAZC8UbpkPB2gPbxnh1w8qCY+PNF4FbsjkOm7iRGJvNMwIj3XGL88r+5ovCylijuf3oC8Md2pvBQghjwOY3Y8IWDZvJKBxzzQ2JA8HFC7PP56j7yoJr48FrCRPGOfPzxcrxO8r47tO00iSjxHWp+8BMu2vOiFFr2o/ry7ZufXPHV0IbxSWuk7iHEEu0xaRDyUOVQ8qy7NPI5xKbtz9A45ny6DuvF9Ubyj/he87xVHvAuT4Ttej6E8vJukvLw7KbxHqiG8eKQxu2S3x7rsHbG7T4rUOzaNTLzg6Pg69lV1OSGw27xFQpe7ZufXu/KV2buVsde82FjIO3OUEz270x68/8qRPFlXgrpet6K8qj7GOqv2UjyQAbW7iomMvDkNXzzCY8+8Bbs9vHRcGbwyfa47572QPFPqdLxzRJG8/lKOOnt0xrw17cc7TjpSOujVmDtcX5E8cWQDPTSFvbsDsy49i6GUPNZQubz9ioi66u2gPJEZvTz3RXy8CStXO9LgH7ykjqO8fOzJO44hpzu7gxw8RcoTPJWJVjyKsQ08rv5hPNgIRrx2FCY7FtiSu9kgTjxffyi8kjHFPERSkDy3OwS7aHfjvL0Dr7ulfiq7AAubPNLgHzw5Dd85RmoYOwszZry7gxw70uCfPLJuezy3swc76f0ZvOZVBrxsN3887EWyPFw3ED27qx28IqDivAAzHLwHS0k8qIY5PNLgH7yaEXg8M201vMJjzzwy9bG796X3PB2gPbznHYy7XU8YPF8vpjyISQO9dISaPMjbdzwDY6w8vqMzPHS8FDzRGBo9RfKUvL+TOjwe4Ea8HHg8vDr9ZTx2xKO8U0pwvAhj0Ts6/eU8kWk/Peu1pjwkgPC7Wh+IuustKjxc/xW8B5vLu2kX6Dt4fDC6p+a0vCsFhrz1Fey8DXNvvPiVfjylBqe8CBPPPFp/Azz96gO7iZkFPGRnxbuT0cm6uuOXvPVlbrzJK3o8HNi3PHG0hbw9HX2835j2vNUQMLoa+Km8ZafOu70rsLvSkB08VIp5PGGHN7zRyBe8BoNDPBWYiTz0JeW7fQRSu9OoJTxGapi7c/SOu/1ih7wFG7m8iEkDPHzsybuqPsa66A0TvRnQqDt0XBm8u6udPPQlZTwH08W8ps4svAszZrrEy1k8Q8KEvKy+WDq7IyE9lqFeO5ChOTwtvZK8ZLdHvNvYWrwPo389A2OsOwlT2LtZB4A818i8urdjBT1FohK829javJgx6rp6NL28oyaZvKcOtjkFazu7vJukPLwTKDwiUGC8oPaIOgNjrLppZ+q7RaKSO5EZvbxlH9I7kHm4Ow+jfzvQsA88a0d4vLq7lrs5vdw7t7OHPIvJlbyDpPs6jkkoPKOeHDsiUOC7M+U4PApD3zxs53w6XheePDa1zbrmpYi8sR55PKYerzzamFG8XDeQvNTALTyg9oi8sH50vAfTRTwB06C81EiqugdzSjx8FEu87B2xvGpXcbwt9Yy8lOlRPIAk6buVAVq8eFSvvIzhnbwtbZC84Oj4vKiGuTuwfnS7NK2+vAWTPL07Pe+8iPmAvJP5yrxFyhM8v7u7vHG0hbyrfs+8SbKwOw4T9DzdaGY8lWHVu4kRibwEyza7cYyEu0YyHjwCmya8L50gPIk5CjuZIfG77TU5PLybJDy7gxy8PI1xvMTL2TyirpU8a6dzPPGlUjxc15S8uNsIvEXylDz/QpW7CDtQvP2yiby+8zW7XIeSu6AeirxzlBO8UHrbvHucRzzRUJQ7i8mVu8fr8DwdyD68zziMvPOF4DyNMSC8qP48PKFGC7xHWh88z8CIPAC7mDyIIQI9gcRtu5Wx17yW8eC6fSzTvKKGlDw2Fck8XXcZvFkvgTzXGD88ddScu6t+T7pRCme85Y2AvPC1yzsaICu85qWIvBrAr7xyzA283HhfvJLhwjxhXza8RaKSu41ZobwXeBc8oW4MvEdan7xqV/G8ApsmvWtH+LsGC8A87B2xPNjgxDt3jCm8DCPtutIIIbyu/uE7CkPfO5ZR3LwPo387572QPBfImTxrp3M8HFC7PGjHZbrgOHs8vYsrvBWYCTwcADm8yXt8vF0nl7xn11687iXAPO79vjyPOa882fhMPDgdWLj+eg882hBVuerFnzvzNd676hUivC1FD7yu/mG5/MICPHr8QrynDja7zzgMPAVDOryorjo7IBBXOwcjSDynvjO8pd4lPRogK7yqZsc70gghOz3NertLur+8j7EyvNHIlzxdTxg8IEjRPP/KkTwLM2Y8ejQ9vJfh57vxzVO8U5ryPJQR0zqYMeq7pLaku6iGObxZBwC86j0jvM8Qi7ytXl28TyrZO3m8ObxsN/+71TixPPQl5bwKQ9+8iWELvF4/n7wurRk9LR2OO2eH3Dzn5ZE8FTgOPKG+DrsC6yg8C5PhuncsLrzt5bY8wotQPKpmR7o6/WW8l0FjPHFkAzzQiI68GYCmvNJonLuTcc48Xy+mO/BlybugHoo8ufOQvDa1zTzrBak7yNv3u3mUOLzxBc67WqeEvIsZGDyD9H28pm6xOw8D+zymbjE8nwYCvDjN1Tov1Rq7qcZCu/PlW7z1xek88vXUvH804jvtvbU7z+iJPJGRwLy9KzA89WXuO3FkgzyqPka8B9PFvJ/eALzsHbG4L/0bPKxGVTxH0iI8llHcvHMcELtrp/O7q6bQPDTVv7vI23e7pm6xvHskRLuwfvS8GagnvUaSGbzSkB081CCpu8W7YL2A1Ga8I0BnPPiVfruj/pc8IqDiuxwoujtypIw88kXXu/4qjbqISQO8o3abPAM7q7yCZHK7/8qRu8VbZbyTqcg7NP3Auwvj47yNMSA7UQpnPEmysDywLnI7uFOMOzkNXzwu5ZM8lJnPu+stqjszvbe8i8kVPdjgxLuOcSk8GiCru3KkDLu264E7cnyLvAkD1rsBgx48fXzVvEk6rTv/GpQ7/gIMO76jMzufBgK7COvNvNSYLLy2w4C8dAyXPL6jszt8PMy8lWFVPAFLpDsxtag8L3UfPNaguzqUOdS8MS2svJ8ug7rFW+W8o8YdPIsZGDwj8OQ8ujMaO3e0KrtLQry89NViu/56D72fBoK7koFHu7eLhro17Uc8vQMvPF4XHjvrBSk9wUvHugHToDwMg+i8NU3DOzud6jznRY28fvTYO0OKCrwHS8m8DcNxvEOKijwHI0i8WVcCvTBlpjyN+SU8qP48u14/n7zUSCq8lqHeuja1TTzFu2A8X38ovF8HpbzTMCK7eqxAvFr3hjvTMCK7GBgcu4NUebwA45k8HaA9vFlXArzO+II8mNHuvNx4Xzxx3IY7q6ZQvKfmtDzFW2W8HgjIO1Xae7xzRJE884XgvKNOmjy7gxw49HVnvP5SjrwYaB685lWGvBnQqLx11By7XScXvY6ZqrkC66i8grT0POgNkzulfqq8kZFAvJjR7ruhRgu8HWjDPNQgqbxdn5o53CjdvMg7czxpZ+o8ddScvP5SjrzvnUM8xvtpu+iFFjwKQ988r+5oPqBWhLzPOIw6FKgCPfZVdTw7PW+8kAE1PfC1y7svTR67LW2QPGBHrrybAX+8A7OuvIwJHzzQsI88DwN7u4npB72JwQa8iTkKveXdAr1kB0q71RCwuxbYkrugfoW8AsOnPKj+vDrDA9S4YG8vPCylijyYMWo79RXsvATLtrwc2Lc8DCPtPH0EUjpxPAI8S7q/PP0SBTyvjm08O51qPLc7hDtMqkY8ixkYvIyRm7yUOVQ8z+iJPLbrATysvli8SYovPCQw7jytXl28SdqxOxWYCT1blws9S5I+vPyagTxsN3+7k9HJulqnhLsLM+Y6qNa7uy/9Gz2sHlQ85QWEPAg7ULynvjM8alfxvCAQVzqX4Wc88+XbOhXACrvqjSU8pm4xO7w7Kbymziy9CQNWvIpRkjzp/Rk8F1AWPS4NFT1Uivm7ilGSOjIdMzxff6i8qt5KvLoLGb1etyI8pBagPNmAybxgHy27rp7mO1rPBbwGW0I8LyUdPBxQuzx4VC8806glPKGWjTwtHY68XXcZO99I9LzoXRW8RgodvBSogjtIEqy8No1Mu0bim7sbELI8FKiCO30E0ryTqUi8JSB1vE0iSjyL8Ra81/C9O9jgxLsUSAc8cRQBO7ijjrqR8Ts7ZC9LuwBbHTx93FC7CzPmPExaxDunXri8ZR/SuhzYN7z2BXM7OB3YvJoReDyJmQU7xvvpPHh8sLtkB0o7rQ5bPKS2JLtRuuQ8IWDZvMDTwzu4e408l5FlvCx9CbyjTho8tzuEvEaSGb0c2Lc7itmOvCEA3ryVYVW8/YoIvaP+l7xaH4i4W5cLuwCTlzwGq0Q7ztABvTT9wLt8FMs3XU+YPP2KCLxyzI076U0cPFkHgLzrVau80RiavEiaKL5b5w0909Cmu19/qLy6u5Y87b01PLybpDzb2Fo88GXJvOA4e7qMuZw8IlDgvNIIobx7TMW8RNoMvFXae7xjx0C8cgQIPDzdcz1D6gU8icGGPJNxzrwNw3E8gcRtvMTLWTtQyl28N6VUO8/oCT3u/b68mNFuOzNttbxOEtE79gVzOkJyAjyOwau75lWGu9doQTuNWaG8NwVQvLezhzxff6g8HRhBPJWJ1juNWaG8iZkFusW74LvEG1w8txODPKVWKT0USIe72ajKPGjH5bynXjg87M2uPK5O5LqDpHs8GnAtvFrPhTzeWG28Yk89PC29EjxZL4G8BoPDPHrUwbs0hb268h3WOqy+WDuM4R08SdoxvEtCPDxTmvK7FtgSvdx4Xzv0dee8x0tsPEoqNDy8myS9LjWWPDV1xLygfgW8TsLOu+wdsTozlba8vJskvAEjo7xKojc8mDHqOwEjozsw7SK8SYqvvNn4zDxkB0q8Siq0vDa1zbvEG9w7vHMjPAfTxTtsN/85a/f1uzgd2DvgiP07CQNWvDzdczxDEgc8IMBUPMMD1DuPYTA8jumsPB4wybtkL8s6rQ5bvGTfyDwCm6Y8Q+oFPSJQ4LsAkxc8Ok1ovIP0fbygzoc8QxKHO7ezBz3eqO+56A0TvdU4Mbu4e4271qA7u5HJur3WKDi8dsQjO58ugzsypS88ursWPXWsG7upFsU8pS4oPNYouDtiT728ciwJvFvnjbxaHwi8FmCPPP4CjLyyvn08Vdr7vB+ozLxk38g896V3vI7Bq7wDAzE72LjDvGRnxbv3RXw76XWdvI2BIjwIE888AnOlOeCIfby9iyu8AwMxPMEjRrxgz6q7qNa7t4yRG70rPYA8dfwdPIj5AL0f0E08IEjRu9x4Xzv2tfC85qUIPC8lnTsDi628tusBPMnLfrxPKtm6icEGvUQCjryv7ui8ufOQO9WIszzgiP264Oj4O0tCvDu9s6y7dUwgvKiuurmqBsy7Snq2vO1tszzltYG7u6udO6SOo7wmYP47L3WfO68+a7v/8hK8S/I5PbdjBTzwjUo8f+TfvHVMoLs3fdO8C+PjvBs4MzyWoV677KUtvAGrn7yJEYm7gcTtu3lENrsrBYY8N1VSu4tBGTscALm7wuvLvLcTAzyjxp08aCfhupqxfDzWALc7pLaktxNYgLzEy1m70NgQPbzrpjvZqEq8t4sGvJdBY72QUbc8Ms2wvDXtx7yBdGs7l0FjvCTga7v8woK7icGGvDJFtLt3LK47MMUhvAH7oTtx3IY7dmQove9NQTxKKjQ9MS0sPMITTTwIi1I8D6N/vK/uaLxcXxE8M221PN0Y5DtnN9o6O53qvDLNsDzHS2w7UvrtvNZ4ujzFW+W8jOEdPCAQ1zywLnK8cqQMPL/jPDt1rBs9clSKvBs4M7mihpS8uCuLvOdtjjx6rMC5DCNtvEQqjzinXri56F0VO+rtID2lLqg7FXAIvFz/lTxOElG87PWvu0PChDqMaZq8fBRLvGtH+Dum9q060LAPvSBIUT23iwY8vWOqPNAoEzxg96s7LZWRO9fIPLzQiA68krnBu1rPBb0szYu8MzW7ux8gULxc15Q7LfWMPJ/egDxzlBM7YJcwO4kRCb3mzYk8Y+/BPNT4pzwjkGm8ojaSPBgYHD0PA3s8RAIOu87QgbtN0se6TOJAPJkh8bz+Agy72ahKvP/KET3mVQa6Bbu9u8iLdTt7xMg4W5eLPBQghjxfL6Y3ZafOOVJaabyMCZ+8GTAkvEPCBLyJEYm850UNvakWRTx11Bw9ob4OPHcErTvdGOQ8H9BNO3VMoLxUOne7NwVQPJWxV7wBqx+9uAMKPQZbwjrBm8k8Q8IEPeA4+7wZgKY8xvvpOzBlJjm4ow68UbrkOzhtWrw2Zcu7W2+KPIvxlrz0dWe8ulsbvMRr3rrXyDy8JDDuPFlXAjvceF89igGQOgmz07sv1Zo8fkRbPI7BqzyaYfo8kUE+Otx437xrR/i7o/6XPP8aFDy+ozO8arfsvEVCl7wX8Bq7FoiQvBfwmrn/8pK7AnOlPBWYiTweMMk7ASOjPFxfkbssfYm8qCY+uwSjNTxSWmk87M2uu/9CFb2nlrI8DCPtu9oQ1bySgce5gcRtPBhAHbvmVYa8FEiHO/4CDDzUcCu7zvgCu/OF4Lxdx5u8kllGvU0iSjvPmIc80VAUvAvjYzy5a5S7", + } + ], + "model": "text-embedding-ada-002-v2", + "usage": {"prompt_tokens": 5, "total_tokens": 5}, + }, + ], +} + + +@pytest.fixture(scope="session") +def simple_get(openai_version, extract_shortened_prompt): + def _simple_get(self): + content_len = int(self.headers.get("content-length")) + content = json.loads(self.rfile.read(content_len).decode("utf-8")) + stream = content.get("stream", False) + prompt = extract_shortened_prompt(content) + if not prompt: + self.send_response(500) + self.end_headers() + self.wfile.write("Could not parse prompt.".encode("utf-8")) + return + + headers, response = ({}, "") + + mocked_responses = RESPONSES_V1 + if stream: + mocked_responses = STREAMED_RESPONSES_V1 + + for k, v in mocked_responses.items(): + if prompt.startswith(k): + headers, status_code, response = v + break + else: # If no matches found + self.send_response(500) + self.end_headers() + self.wfile.write(("Unknown Prompt:\n%s" % prompt).encode("utf-8")) + return + + # Send response code + self.send_response(status_code) + + # Send headers + for k, v in headers.items(): + self.send_header(k, v) + self.end_headers() + + # Send response body + if stream and status_code < 400: + for resp in response: + data = json.dumps(resp).encode("utf-8") + if prompt == "Stream parsing error.": + # Force a parsing error by writing an invalid streamed response. + self.wfile.write(b"data: %s" % data) + else: + self.wfile.write(b"data: %s\n\n" % data) + else: + self.wfile.write(json.dumps(response).encode("utf-8")) + return + + return _simple_get + + +@pytest.fixture(scope="session") +def MockExternalOpenAIServer(simple_get): + class _MockExternalOpenAIServer(MockExternalHTTPServer): + # To use this class in a test one needs to start and stop this server + # before and after making requests to the test app that makes the external + # calls. + + def __init__(self, handler=simple_get, port=None, *args, **kwargs): + super(_MockExternalOpenAIServer, self).__init__(handler=handler, port=port, *args, **kwargs) + + return _MockExternalOpenAIServer + + +@pytest.fixture(scope="session") +def extract_shortened_prompt(openai_version): + def _extract_shortened_prompt(content): + _input = content.get("input", None) + prompt = (_input and str(_input[0][0])) or content.get("messages")[0]["content"] + return prompt + + return _extract_shortened_prompt + + +def get_openai_version(): + # Import OpenAI so that get package version can catpure the version from the + # system module. OpenAI does not have a package version in v0. + import openai # noqa: F401; pylint: disable=W0611 + + return get_package_version_tuple("openai") + + +@pytest.fixture(scope="session") +def openai_version(): + return get_openai_version() + + +if __name__ == "__main__": + _MockExternalOpenAIServer = MockExternalOpenAIServer() + with MockExternalOpenAIServer() as server: + print("MockExternalOpenAIServer serving on port %s" % str(server.port)) + while True: + pass # Serve forever diff --git a/tests/mlmodel_langchain/conftest.py b/tests/mlmodel_langchain/conftest.py new file mode 100644 index 000000000..6012e67fc --- /dev/null +++ b/tests/mlmodel_langchain/conftest.py @@ -0,0 +1,237 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import os + +import pytest +from _mock_external_openai_server import ( # noqa: F401; pylint: disable=W0611 + MockExternalOpenAIServer, + extract_shortened_prompt, + get_openai_version, + openai_version, + simple_get, +) +from langchain_openai import ChatOpenAI, OpenAIEmbeddings +from testing_support.fixture.event_loop import ( # noqa: F401; pylint: disable=W0611 + event_loop as loop, +) +from testing_support.fixtures import ( # noqa: F401, pylint: disable=W0611 + collector_agent_registration_fixture, + collector_available_fixture, + override_application_settings, +) + +from newrelic.api.transaction import current_transaction +from newrelic.common.object_wrapper import ObjectProxy, wrap_function_wrapper +from newrelic.common.signature import bind_args + +_default_settings = { + "transaction_tracer.explain_threshold": 0.0, + "transaction_tracer.transaction_threshold": 0.0, + "transaction_tracer.stack_trace_threshold": 0.0, + "debug.log_data_collector_payloads": True, + "debug.record_transaction_failure": True, + "ai_monitoring.enabled": True, +} + +collector_agent_registration = collector_agent_registration_fixture( + app_name="Python Agent Test (mlmodel_langchain)", + default_settings=_default_settings, + linked_applications=["Python Agent Test (mlmodel_langchain)"], +) + + +OPENAI_AUDIT_LOG_FILE = os.path.join(os.path.realpath(os.path.dirname(__file__)), "openai_audit.log") +OPENAI_AUDIT_LOG_CONTENTS = {} +# Intercept outgoing requests and log to file for mocking +RECORDED_HEADERS = set(["x-request-id", "content-type"]) + + +@pytest.fixture(scope="session") +def openai_clients(openai_version, MockExternalOpenAIServer): # noqa: F811 + """ + This configures the openai client and returns it for openai v1 and only configures + openai for v0 since there is no client. + """ + from newrelic.core.config import _environ_as_bool + + if not _environ_as_bool("NEW_RELIC_TESTING_RECORD_OPENAI_RESPONSES", False): + with MockExternalOpenAIServer() as server: + chat = ChatOpenAI( + base_url="http://localhost:%d" % server.port, + api_key="NOT-A-REAL-SECRET", + ) + embeddings = OpenAIEmbeddings( + openai_api_key="NOT-A-REAL-SECRET", openai_api_base="http://localhost:%d" % server.port + ) + yield chat, embeddings + else: + openai_api_key = os.environ.get("OPENAI_API_KEY") + if not openai_api_key: + raise RuntimeError("OPENAI_API_KEY environment variable required.") + chat = ChatOpenAI( + api_key=openai_api_key, + ) + embeddings = OpenAIEmbeddings(openai_api_key=openai_api_key) + yield chat, embeddings + + +@pytest.fixture(scope="session") +def embedding_openai_client(openai_clients): + _, embedding_client = openai_clients + return embedding_client + + +@pytest.fixture(scope="session") +def chat_openai_client(openai_clients): + chat_client, _ = openai_clients + return chat_client + + +@pytest.fixture(autouse=True, scope="session") +def openai_server( + openai_version, # noqa: F811 + openai_clients, + wrap_httpx_client_send, + wrap_stream_iter_events, +): + """ + This fixture will either create a mocked backend for testing purposes, or will + set up an audit log file to log responses of the real OpenAI backend to a file. + The behavior can be controlled by setting NEW_RELIC_TESTING_RECORD_OPENAI_RESPONSES=1 as + an environment variable to run using the real OpenAI backend. (Default: mocking) + """ + from newrelic.core.config import _environ_as_bool + + if _environ_as_bool("NEW_RELIC_TESTING_RECORD_OPENAI_RESPONSES", False): + wrap_function_wrapper("httpx._client", "Client.send", wrap_httpx_client_send) + wrap_function_wrapper( + "openai._streaming", + "Stream._iter_events", + wrap_stream_iter_events, + ) + yield # Run tests + # Write responses to audit log + with open(OPENAI_AUDIT_LOG_FILE, "w") as audit_log_fp: + json.dump(OPENAI_AUDIT_LOG_CONTENTS, fp=audit_log_fp, indent=4) + else: + # We are mocking openai responses so we don't need to do anything in this case. + yield + + +@pytest.fixture(scope="session") +def wrap_httpx_client_send(extract_shortened_prompt): # noqa: F811 + def _wrap_httpx_client_send(wrapped, instance, args, kwargs): + bound_args = bind_args(wrapped, args, kwargs) + stream = bound_args.get("stream", False) + request = bound_args["request"] + if not request: + return wrapped(*args, **kwargs) + + params = json.loads(request.content.decode("utf-8")) + prompt = extract_shortened_prompt(params) + + # Send request + response = wrapped(*args, **kwargs) + + if response.status_code >= 400 or response.status_code < 200: + prompt = "error" + + rheaders = getattr(response, "headers") + + headers = dict( + filter( + lambda k: k[0].lower() in RECORDED_HEADERS + or k[0].lower().startswith("openai") + or k[0].lower().startswith("x-ratelimit"), + rheaders.items(), + ) + ) + # Append response data to log + if stream: + OPENAI_AUDIT_LOG_CONTENTS[prompt] = [headers, response.status_code, []] + if prompt == "error": + OPENAI_AUDIT_LOG_CONTENTS[prompt][2] = json.loads(response.read()) + else: + body = json.loads(response.content.decode("utf-8")) + OPENAI_AUDIT_LOG_CONTENTS[prompt] = headers, response.status_code, body + return response + + return _wrap_httpx_client_send + + +@pytest.fixture(scope="session") +def generator_proxy(openai_version): + class GeneratorProxy(ObjectProxy): + def __init__(self, wrapped): + super(GeneratorProxy, self).__init__(wrapped) + + def __iter__(self): + return self + + # Make this Proxy a pass through to our instrumentation's proxy by passing along + # get attr and set attr calls to our instrumentation's proxy. + def __getattr__(self, attr): + return self.__wrapped__.__getattr__(attr) + + def __setattr__(self, attr, value): + return self.__wrapped__.__setattr__(attr, value) + + def __next__(self): + transaction = current_transaction() + if not transaction: + return self.__wrapped__.__next__() + + try: + return_val = self.__wrapped__.__next__() + if return_val: + prompt = [k for k in OPENAI_AUDIT_LOG_CONTENTS.keys()][-1] + if openai_version < (1, 0): + headers = dict( + filter( + lambda k: k[0].lower() in RECORDED_HEADERS + or k[0].lower().startswith("openai") + or k[0].lower().startswith("x-ratelimit"), + return_val._nr_response_headers.items(), + ) + ) + OPENAI_AUDIT_LOG_CONTENTS[prompt][0] = headers + OPENAI_AUDIT_LOG_CONTENTS[prompt][2].append(return_val.to_dict_recursive()) + else: + if not getattr(return_val, "data", "").startswith("[DONE]"): + OPENAI_AUDIT_LOG_CONTENTS[prompt][2].append(return_val.json()) + return return_val + except Exception as e: + raise + + def close(self): + return super(GeneratorProxy, self).close() + + return GeneratorProxy + + +@pytest.fixture(scope="session") +def wrap_stream_iter_events(generator_proxy): + def _wrap_stream_iter_events(wrapped, instance, args, kwargs): + transaction = current_transaction() + + if not transaction: + return wrapped(*args, **kwargs) + + return_val = wrapped(*args, **kwargs) + proxied_return_val = generator_proxy(return_val) + return proxied_return_val + + return _wrap_stream_iter_events diff --git a/tests/mlmodel_langchain/hello.pdf b/tests/mlmodel_langchain/hello.pdf new file mode 100644 index 000000000..4eb6f2ac5 Binary files /dev/null and b/tests/mlmodel_langchain/hello.pdf differ diff --git a/tests/mlmodel_langchain/test_agent.py b/tests/mlmodel_langchain/test_agent.py new file mode 100644 index 000000000..1aa501567 --- /dev/null +++ b/tests/mlmodel_langchain/test_agent.py @@ -0,0 +1,93 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import langchain +import pytest +from langchain.agents import AgentExecutor, create_openai_functions_agent +from langchain.prompts import ChatPromptTemplate +from langchain.tools import tool +from langchain_core.prompts import MessagesPlaceholder +from testing_support.fixtures import reset_core_stats_engine, validate_attributes +from testing_support.validators.validate_transaction_metrics import ( + validate_transaction_metrics, +) + +from newrelic.api.background_task import background_task + + +@pytest.fixture +def tools(): + @tool + def multi_arg_tool(first_num, second_num): + """A test tool that adds two integers together""" + return first_num + second_num + + return [multi_arg_tool] + + +@pytest.fixture +def prompt(): + return ChatPromptTemplate.from_messages( + [ + ( + "system", + "You are a world class algorithm for extracting information in structured formats.", + ), + ( + "human", + "Use the given format to extract information from the following input: {input}", + ), + ("human", "Tip: Make sure to answer in the correct format"), + MessagesPlaceholder( + variable_name="agent_scratchpad", + ), + ] + ) + + +@reset_core_stats_engine() +@validate_transaction_metrics( + name="test_agent:test_sync_agent", + scoped_metrics=[("Llm/agent/Langchain/invoke", 1)], + rollup_metrics=[("Llm/agent/Langchain/invoke", 1)], + custom_metrics=[ + ("Supportability/Python/ML/Langchain/%s" % langchain.__version__, 1), + ], + background_task=True, +) +@validate_attributes("agent", ["llm"]) +@background_task() +def test_sync_agent(chat_openai_client, tools, prompt): + agent = create_openai_functions_agent(chat_openai_client, tools, prompt) + agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True) + response = agent_executor.invoke({"input": "Hello, world"}) + assert response + + +@reset_core_stats_engine() +@validate_transaction_metrics( + name="test_agent:test_async_agent", + scoped_metrics=[("Llm/agent/Langchain/ainvoke", 1)], + rollup_metrics=[("Llm/agent/Langchain/ainvoke", 1)], + custom_metrics=[ + ("Supportability/Python/ML/Langchain/%s" % langchain.__version__, 1), + ], + background_task=True, +) +@validate_attributes("agent", ["llm"]) +@background_task() +def test_async_agent(loop, chat_openai_client, tools, prompt): + agent = create_openai_functions_agent(chat_openai_client, tools, prompt) + agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True) + loop.run_until_complete(agent_executor.ainvoke({"input": "Hello, world"})) diff --git a/tests/mlmodel_langchain/test_chain.py b/tests/mlmodel_langchain/test_chain.py new file mode 100644 index 000000000..f6f877d63 --- /dev/null +++ b/tests/mlmodel_langchain/test_chain.py @@ -0,0 +1,1647 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import asyncio +import uuid + +import langchain +import openai +import pytest +from langchain.chains.openai_functions import ( + create_structured_output_chain, + create_structured_output_runnable, +) +from langchain.prompts import ChatPromptTemplate +from langchain.schema import BaseOutputParser +from mock import patch +from testing_support.fixtures import ( + reset_core_stats_engine, + validate_attributes, + validate_custom_event_count, +) +from testing_support.ml_testing_utils import ( # noqa: F401 + disabled_ai_monitoring_record_content_settings, + disabled_ai_monitoring_settings, + events_sans_content, + set_trace_info, +) +from testing_support.validators.validate_custom_events import validate_custom_events +from testing_support.validators.validate_error_trace_attributes import ( + validate_error_trace_attributes, +) +from testing_support.validators.validate_transaction_error_event_count import ( + validate_transaction_error_event_count, +) +from testing_support.validators.validate_transaction_metrics import ( + validate_transaction_metrics, +) + +from newrelic.api.background_task import background_task +from newrelic.api.transaction import add_custom_attribute +from newrelic.common.object_names import callable_name + +_test_openai_chat_completion_messages = ( + {"role": "system", "content": "You are a scientist."}, + {"role": "user", "content": "What is 212 degrees Fahrenheit converted to Celsius?"}, +) + + +chat_completion_recorded_events_invoke_langchain_error = [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "span_id": None, + "trace_id": "trace-id", + "vendor": "langchain", + "ingest_source": "Python", + "virtual_llm": True, + "request_id": None, + "duration": None, + "response.number_of_messages": 1, + "metadata.id": "123", + "error": True, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": None, + "span_id": None, + "trace_id": "trace-id", + "content": "{'no-exist': 'Sally is 13'}", + "completion_id": None, + "sequence": 0, + "vendor": "langchain", + "ingest_source": "Python", + "virtual_llm": True, + }, + ), +] + +chat_completion_recorded_events_runnable_invoke_openai_error = [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "span_id": None, + "trace_id": "trace-id", + "vendor": "langchain", + "ingest_source": "Python", + "virtual_llm": True, + "request_id": None, + "duration": None, + "response.number_of_messages": 1, + "metadata.id": "123", + "error": True, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": None, + "span_id": None, + "trace_id": "trace-id", + "content": "{'input': 'Sally is 13'}", + "completion_id": None, + "sequence": 0, + "vendor": "langchain", + "ingest_source": "Python", + "virtual_llm": True, + }, + ), +] +chat_completion_recorded_events_runnable_invoke = [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "span_id": None, + "trace_id": "trace-id", + "vendor": "langchain", + "ingest_source": "Python", + "virtual_llm": True, + "request_id": None, + "duration": None, + "response.number_of_messages": 2, + "metadata.id": "123", + "tags": "['bar']", + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": None, + "span_id": None, + "trace_id": "trace-id", + "content": "{'input': 'Sally is 13'}", + "completion_id": None, + "sequence": 0, + "vendor": "langchain", + "ingest_source": "Python", + "virtual_llm": True, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": None, + "span_id": None, + "trace_id": "trace-id", + "content": "{'name': 'Sally', 'age': 13}", + "completion_id": None, + "sequence": 1, + "vendor": "langchain", + "ingest_source": "Python", + "is_response": True, + "virtual_llm": True, + }, + ), +] +chat_completion_recorded_events_invoke = [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "span_id": None, + "trace_id": "trace-id", + "vendor": "langchain", + "ingest_source": "Python", + "virtual_llm": True, + "request_id": None, + "duration": None, + "response.number_of_messages": 2, + "metadata.id": "123", + "tags": "['bar']", + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": None, + "span_id": None, + "trace_id": "trace-id", + "content": "{'input': 'Sally is 13'}", + "completion_id": None, + "sequence": 0, + "vendor": "langchain", + "ingest_source": "Python", + "virtual_llm": True, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": None, + "span_id": None, + "trace_id": "trace-id", + "content": "{'function': {'name': 'Sally', 'age': 13}}", + "completion_id": None, + "sequence": 1, + "vendor": "langchain", + "ingest_source": "Python", + "is_response": True, + "virtual_llm": True, + }, + ), +] +chat_completion_recorded_events_runnable_invoke_no_metadata_or_tags = [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "span_id": None, + "trace_id": "trace-id", + "vendor": "langchain", + "ingest_source": "Python", + "virtual_llm": True, + "request_id": None, + "duration": None, + "response.number_of_messages": 2, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": None, + "span_id": None, + "trace_id": "trace-id", + "content": "{'input': 'Sally is 13'}", + "completion_id": None, + "sequence": 0, + "vendor": "langchain", + "ingest_source": "Python", + "virtual_llm": True, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": None, + "span_id": None, + "trace_id": "trace-id", + "content": "{'name': 'Sally', 'age': 13}", + "completion_id": None, + "sequence": 1, + "vendor": "langchain", + "ingest_source": "Python", + "is_response": True, + "virtual_llm": True, + }, + ), +] +chat_completion_recorded_events_invoke_no_metadata_or_tags = [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "span_id": None, + "trace_id": "trace-id", + "vendor": "langchain", + "ingest_source": "Python", + "virtual_llm": True, + "request_id": None, + "duration": None, + "response.number_of_messages": 2, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": None, + "span_id": None, + "trace_id": "trace-id", + "content": "{'input': 'Sally is 13'}", + "completion_id": None, + "sequence": 0, + "vendor": "langchain", + "ingest_source": "Python", + "virtual_llm": True, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": None, + "span_id": None, + "trace_id": "trace-id", + "content": "{'function': {'name': 'Sally', 'age': 13}}", + "completion_id": None, + "sequence": 1, + "vendor": "langchain", + "ingest_source": "Python", + "is_response": True, + "virtual_llm": True, + }, + ), +] + +chat_completion_recorded_events_list_response = [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "span_id": None, + "trace_id": "trace-id", + "vendor": "langchain", + "ingest_source": "Python", + "virtual_llm": True, + "request_id": None, + "duration": None, + "response.number_of_messages": 2, + "metadata.id": "123", + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": None, + "span_id": None, + "trace_id": "trace-id", + "content": "{'text': 'colors'}", + "completion_id": None, + "sequence": 0, + "vendor": "langchain", + "ingest_source": "Python", + "virtual_llm": True, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": None, + "span_id": None, + "trace_id": "trace-id", + "content": "red", + "completion_id": None, + "sequence": 1, + "vendor": "langchain", + "ingest_source": "Python", + "is_response": True, + "virtual_llm": True, + }, + ), +] + +chat_completion_recorded_events_error_in_openai = [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "span_id": None, + "trace_id": "trace-id", + "vendor": "langchain", + "ingest_source": "Python", + "virtual_llm": True, + "request_id": None, + "duration": None, + "response.number_of_messages": 1, + "metadata.id": "123", + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": None, + "span_id": None, + "trace_id": "trace-id", + "content": "openai failure", + "completion_id": None, + "sequence": 0, + "vendor": "langchain", + "ingest_source": "Python", + "virtual_llm": True, + }, + ), +] + +chat_completion_recorded_events_error_in_langchain = [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "span_id": None, + "trace_id": "trace-id", + "vendor": "langchain", + "ingest_source": "Python", + "virtual_llm": True, + "request_id": None, + "duration": None, + "response.number_of_messages": 1, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": None, + "span_id": None, + "trace_id": "trace-id", + "content": "colors", + "completion_id": None, + "sequence": 0, + "vendor": "langchain", + "ingest_source": "Python", + "virtual_llm": True, + }, + ), +] + + +@reset_core_stats_engine() +@validate_custom_events(chat_completion_recorded_events_list_response) +@validate_custom_event_count(count=7) +@validate_transaction_metrics( + name="test_chain:test_langchain_chain_list_response", + scoped_metrics=[("Llm/chain/Langchain/invoke", 1)], + rollup_metrics=[("Llm/chain/Langchain/invoke", 1)], + custom_metrics=[ + ("Supportability/Python/ML/Langchain/%s" % langchain.__version__, 1), + ], + background_task=True, +) +@background_task() +def test_langchain_chain_list_response(set_trace_info, comma_separated_list_output_parser, chat_openai_client): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") + + template = """You are a helpful assistant who generates comma separated lists. + A user will pass in a category, and you should generate 5 objects in that category in a comma separated list. + ONLY return a comma separated list, and nothing more.""" + human_template = "{text}" + + chat_prompt = ChatPromptTemplate.from_messages( + [ + ("system", template), + ("human", human_template), + ] + ) + chain = chat_prompt | chat_openai_client | comma_separated_list_output_parser + chain.invoke( + {"text": "colors"}, + config={"metadata": {"id": "123"}}, + ) + + +@pytest.mark.parametrize( + "create_function,call_function,call_function_args,call_function_kwargs,expected_events", + ( + pytest.param( + create_structured_output_runnable, + "invoke", + ({"input": "Sally is 13"},), + { + "config": { + "tags": ["bar"], + "metadata": {"id": "123"}, + }, + }, + chat_completion_recorded_events_runnable_invoke, + id="runnable_chain.invoke-with-args-and-kwargs", + ), + pytest.param( + create_structured_output_runnable, + "invoke", + (), + { + "input": {"input": "Sally is 13"}, + "config": { + "tags": ["bar"], + "metadata": {"id": "123"}, + }, + }, + chat_completion_recorded_events_runnable_invoke, + id="runnable_chain.invoke-with-only-kwargs", + ), + pytest.param( + create_structured_output_runnable, + "invoke", + ({"input": "Sally is 13"},), + {}, + chat_completion_recorded_events_runnable_invoke_no_metadata_or_tags, + id="runnable_chain.invoke-with-only-args", + ), + pytest.param( + create_structured_output_chain, + "invoke", + ({"input": "Sally is 13"},), + { + "config": { + "tags": ["bar"], + "metadata": {"id": "123"}, + }, + "return_only_outputs": True, + }, + chat_completion_recorded_events_invoke, + id="chain.invoke-with-args-and-kwargs", + ), + pytest.param( + create_structured_output_chain, + "invoke", + (), + { + "input": {"input": "Sally is 13"}, + "config": { + "tags": ["bar"], + "metadata": {"id": "123"}, + }, + "return_only_outputs": True, + }, + chat_completion_recorded_events_invoke, + id="chain.invoke-with-only-kwargs", + ), + pytest.param( + create_structured_output_chain, + "invoke", + ({"input": "Sally is 13"},), + { + "return_only_outputs": True, + }, + chat_completion_recorded_events_invoke_no_metadata_or_tags, + id="chain.invoke-with-only-args", + ), + ), +) +def test_langchain_chain( + set_trace_info, + json_schema, + prompt, + chat_openai_client, + create_function, + call_function, + call_function_args, + call_function_kwargs, + expected_events, +): + @reset_core_stats_engine() + @validate_custom_events(expected_events) + # 3 langchain events and 5 openai events. + @validate_custom_event_count(count=8) + @validate_transaction_metrics( + name="test_chain:test_langchain_chain.._test", + scoped_metrics=[("Llm/chain/Langchain/%s" % call_function, 1)], + rollup_metrics=[("Llm/chain/Langchain/%s" % call_function, 1)], + custom_metrics=[ + ("Supportability/Python/ML/Langchain/%s" % langchain.__version__, 1), + ], + background_task=True, + ) + @validate_attributes("agent", ["llm"]) + @background_task() + def _test(): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") + + runnable = create_function(json_schema, chat_openai_client, prompt) + + output = getattr(runnable, call_function)(*call_function_args, **call_function_kwargs) + + assert output + + _test() + + +@pytest.mark.parametrize( + "create_function,call_function,call_function_args,call_function_kwargs,expected_events", + ( + pytest.param( + create_structured_output_runnable, + "invoke", + ({"input": "Sally is 13"},), + { + "config": { + "tags": ["bar"], + "metadata": {"id": "123"}, + }, + }, + events_sans_content(chat_completion_recorded_events_runnable_invoke), + id="runnable_chain.invoke", + ), + pytest.param( + create_structured_output_chain, + "invoke", + ({"input": "Sally is 13"},), + { + "config": { + "tags": ["bar"], + "metadata": {"id": "123"}, + }, + "return_only_outputs": True, + }, + events_sans_content(chat_completion_recorded_events_invoke), + id="chain.invoke", + ), + ), +) +def test_langchain_chain_no_content( + set_trace_info, + chat_openai_client, + json_schema, + prompt, + create_function, + call_function, + call_function_args, + call_function_kwargs, + expected_events, +): + @reset_core_stats_engine() + @disabled_ai_monitoring_record_content_settings + @validate_custom_events(expected_events) + # 3 langchain events and 5 openai events. + @validate_custom_event_count(count=8) + @validate_transaction_metrics( + name="test_chain:test_langchain_chain_no_content.._test", + scoped_metrics=[("Llm/chain/Langchain/%s" % call_function, 1)], + rollup_metrics=[("Llm/chain/Langchain/%s" % call_function, 1)], + custom_metrics=[ + ("Supportability/Python/ML/Langchain/%s" % langchain.__version__, 1), + ], + background_task=True, + ) + @background_task() + def _test(): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") + + runnable = create_function(json_schema, chat_openai_client, prompt) + + output = getattr(runnable, call_function)(*call_function_args, **call_function_kwargs) + + assert output + + _test() + + +@pytest.mark.parametrize( + "create_function,call_function,call_function_args,call_function_kwargs,expected_events", + ( + pytest.param( + create_structured_output_runnable, + "invoke", + ({"input": "Sally is 13"},), + { + "config": { + "tags": [], + "metadata": {"id": "123"}, + } + }, + chat_completion_recorded_events_runnable_invoke_openai_error, + id="runnable_chain.invoke-with-args-and-kwargs", + ), + pytest.param( + create_structured_output_runnable, + "invoke", + (), + { + "input": {"input": "Sally is 13"}, + "config": { + "metadata": {"id": "123"}, + }, + }, + chat_completion_recorded_events_runnable_invoke_openai_error, + id="runnable_chain.invoke-only-kwargs", + ), + pytest.param( + create_structured_output_chain, + "invoke", + ({"input": "Sally is 13"},), + { + "config": { + "tags": [], + "metadata": {"id": "123"}, + }, + "return_only_outputs": True, + }, + chat_completion_recorded_events_runnable_invoke_openai_error, + id="chain.run-with-args-and-kwargs", + ), + pytest.param( + create_structured_output_chain, + "invoke", + (), + { + "input": {"input": "Sally is 13"}, + "config": { + "metadata": {"id": "123"}, + }, + "return_only_outputs": True, + }, + chat_completion_recorded_events_runnable_invoke_openai_error, + id="chain.invoke-only-kwargs", + ), + ), +) +def test_langchain_chain_error_in_openai( + set_trace_info, + chat_openai_client, + json_schema, + prompt_openai_error, + create_function, + call_function, + call_function_args, + call_function_kwargs, + expected_events, +): + @reset_core_stats_engine() + @validate_transaction_error_event_count(1) + @validate_custom_events(expected_events) + @validate_custom_event_count(count=6) + @validate_transaction_metrics( + name="test_chain:test_langchain_chain_error_in_openai.._test", + scoped_metrics=[("Llm/chain/Langchain/%s" % call_function, 1)], + rollup_metrics=[("Llm/chain/Langchain/%s" % call_function, 1)], + custom_metrics=[ + ("Supportability/Python/ML/Langchain/%s" % langchain.__version__, 1), + ], + background_task=True, + ) + @background_task() + def _test(): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") + + runnable = create_function(json_schema, chat_openai_client, prompt_openai_error) + + with pytest.raises(openai.AuthenticationError): + getattr(runnable, call_function)(*call_function_args, **call_function_kwargs) + + _test() + + +@pytest.mark.parametrize( + "create_function,call_function,call_function_args,call_function_kwargs,expected_events,expected_error", + ( + pytest.param( + create_structured_output_runnable, + "invoke", + ({"no-exist": "Sally is 13"},), + { + "config": { + "tags": [], + "metadata": {"id": "123"}, + } + }, + chat_completion_recorded_events_invoke_langchain_error, + KeyError, + id="runnable_chain.invoke", + ), + pytest.param( + create_structured_output_chain, + "invoke", + ({"no-exist": "Sally is 13"},), + { + "config": { + "tags": [], + "metadata": {"id": "123"}, + }, + "return_only_outputs": True, + }, + chat_completion_recorded_events_invoke_langchain_error, + ValueError, + id="chain.invoke", + ), + ), +) +def test_langchain_chain_error_in_langchain( + set_trace_info, + chat_openai_client, + json_schema, + prompt, + create_function, + call_function, + call_function_args, + call_function_kwargs, + expected_events, + expected_error, +): + @reset_core_stats_engine() + @validate_transaction_error_event_count(1) + @validate_error_trace_attributes( + callable_name(expected_error), + ) + @validate_custom_events(expected_events) + @validate_custom_event_count(count=2) + @validate_transaction_metrics( + name="test_chain:test_langchain_chain_error_in_langchain.._test", + scoped_metrics=[("Llm/chain/Langchain/%s" % call_function, 1)], + rollup_metrics=[("Llm/chain/Langchain/%s" % call_function, 1)], + custom_metrics=[ + ("Supportability/Python/ML/Langchain/%s" % langchain.__version__, 1), + ], + background_task=True, + ) + @background_task() + def _test(): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") + + runnable = create_function(json_schema, chat_openai_client, prompt) + + with pytest.raises(expected_error): + getattr(runnable, call_function)(*call_function_args, **call_function_kwargs) + + _test() + + +@pytest.mark.parametrize( + "create_function,call_function,call_function_args,call_function_kwargs,expected_events,expected_error", + ( + pytest.param( + create_structured_output_runnable, + "invoke", + ({"no-exist": "Sally is 13"},), + { + "config": { + "tags": [], + "metadata": {"id": "123"}, + } + }, + events_sans_content(chat_completion_recorded_events_invoke_langchain_error), + KeyError, + id="runnable_chain.invoke", + ), + pytest.param( + create_structured_output_chain, + "invoke", + ({"no-exist": "Sally is 13"},), + { + "config": { + "tags": [], + "metadata": {"id": "123"}, + }, + "return_only_outputs": True, + }, + events_sans_content(chat_completion_recorded_events_invoke_langchain_error), + ValueError, + id="chain.invoke", + ), + ), +) +def test_langchain_chain_error_in_langchain_no_content( + set_trace_info, + chat_openai_client, + json_schema, + prompt, + create_function, + call_function, + call_function_args, + call_function_kwargs, + expected_events, + expected_error, +): + @reset_core_stats_engine() + @disabled_ai_monitoring_record_content_settings + @validate_transaction_error_event_count(1) + @validate_error_trace_attributes( + callable_name(expected_error), + ) + @validate_custom_events(expected_events) + @validate_custom_event_count(count=2) + @validate_transaction_metrics( + name="test_chain:test_langchain_chain_error_in_langchain_no_content.._test", + scoped_metrics=[("Llm/chain/Langchain/%s" % call_function, 1)], + rollup_metrics=[("Llm/chain/Langchain/%s" % call_function, 1)], + custom_metrics=[ + ("Supportability/Python/ML/Langchain/%s" % langchain.__version__, 1), + ], + background_task=True, + ) + @background_task() + def _test(): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") + + runnable = create_function(json_schema, chat_openai_client, prompt) + + with pytest.raises(expected_error): + getattr(runnable, call_function)(*call_function_args, **call_function_kwargs) + + _test() + + +@pytest.mark.parametrize( + "create_function,call_function,input_", + ((create_structured_output_runnable, "invoke", {"input": "Sally is 13"}),), +) +@reset_core_stats_engine() +@validate_custom_event_count(count=0) +def test_langchain_chain_outside_transaction( + set_trace_info, chat_openai_client, json_schema, prompt, create_function, call_function, input_ +): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") + + runnable = create_function(json_schema, chat_openai_client, prompt) + + output = getattr(runnable, call_function)(input_) + + assert output == {"name": "Sally", "age": 13} + + +@disabled_ai_monitoring_settings +@pytest.mark.parametrize( + "create_function,call_function,input_", + ((create_structured_output_runnable, "invoke", {"input": "Sally is 13"}),), +) +@reset_core_stats_engine() +@validate_custom_event_count(count=0) +@background_task() +def test_langchain_chain_ai_monitoring_disabled( + set_trace_info, chat_openai_client, json_schema, prompt, create_function, call_function, input_ +): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + + runnable = create_function(json_schema, chat_openai_client, prompt) + + output = getattr(runnable, call_function)(input_) + + assert output == {"name": "Sally", "age": 13} + + +@reset_core_stats_engine() +@validate_custom_events(chat_completion_recorded_events_list_response) +@validate_custom_event_count(count=7) +@validate_transaction_metrics( + name="test_chain:test_async_langchain_chain_list_response", + scoped_metrics=[("Llm/chain/Langchain/ainvoke", 1)], + rollup_metrics=[("Llm/chain/Langchain/ainvoke", 1)], + custom_metrics=[ + ("Supportability/Python/ML/Langchain/%s" % langchain.__version__, 1), + ], + background_task=True, +) +@background_task() +def test_async_langchain_chain_list_response( + set_trace_info, comma_separated_list_output_parser, chat_openai_client, loop +): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") + + template = """You are a helpful assistant who generates comma separated lists. + A user will pass in a category, and you should generate 5 objects in that category in a comma separated list. + ONLY return a comma separated list, and nothing more.""" + human_template = "{text}" + + chat_prompt = ChatPromptTemplate.from_messages( + [ + ("system", template), + ("human", human_template), + ] + ) + chain = chat_prompt | chat_openai_client | comma_separated_list_output_parser + + loop.run_until_complete( + chain.ainvoke( + {"text": "colors"}, + config={ + "metadata": {"id": "123"}, + }, + ) + ) + + +@reset_core_stats_engine() +@disabled_ai_monitoring_record_content_settings +@validate_custom_events(events_sans_content(chat_completion_recorded_events_list_response)) +@validate_custom_event_count(count=7) +@validate_transaction_metrics( + name="test_chain:test_async_langchain_chain_list_response_no_content", + scoped_metrics=[("Llm/chain/Langchain/ainvoke", 1)], + rollup_metrics=[("Llm/chain/Langchain/ainvoke", 1)], + custom_metrics=[ + ("Supportability/Python/ML/Langchain/%s" % langchain.__version__, 1), + ], + background_task=True, +) +@background_task() +def test_async_langchain_chain_list_response_no_content( + set_trace_info, comma_separated_list_output_parser, chat_openai_client, loop +): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") + + template = """You are a helpful assistant who generates comma separated lists. + A user will pass in a category, and you should generate 5 objects in that category in a comma separated list. + ONLY return a comma separated list, and nothing more.""" + human_template = "{text}" + + chat_prompt = ChatPromptTemplate.from_messages( + [ + ("system", template), + ("human", human_template), + ] + ) + chain = chat_prompt | chat_openai_client | comma_separated_list_output_parser + + loop.run_until_complete( + chain.ainvoke( + {"text": "colors"}, + config={ + "metadata": {"id": "123"}, + }, + ) + ) + + +@pytest.mark.parametrize( + "create_function,call_function,call_function_args,call_function_kwargs,expected_events", + ( + pytest.param( + create_structured_output_runnable, + "ainvoke", + ({"input": "Sally is 13"},), + { + "config": { + "tags": ["bar"], + "metadata": {"id": "123"}, + } + }, + chat_completion_recorded_events_runnable_invoke, + id="runnable_chain.ainvoke-with-args-and-kwargs", + ), + pytest.param( + create_structured_output_runnable, + "ainvoke", + (), + { + "input": {"input": "Sally is 13"}, + "config": { + "tags": ["bar"], + "metadata": {"id": "123"}, + }, + }, + chat_completion_recorded_events_runnable_invoke, + id="runnable_chain.ainvoke-with-only-kwargs", + ), + pytest.param( + create_structured_output_runnable, + "ainvoke", + ({"input": "Sally is 13"},), + {}, + chat_completion_recorded_events_runnable_invoke_no_metadata_or_tags, + id="runnable_chain.ainvoke-with-only-args", + ), + pytest.param( + create_structured_output_chain, + "ainvoke", + ({"input": "Sally is 13"},), + { + "config": { + "tags": ["bar"], + "metadata": {"id": "123"}, + }, + "return_only_outputs": True, + }, + chat_completion_recorded_events_invoke, + id="chain.ainvoke-with-args-and-kwargs", + ), + pytest.param( + create_structured_output_chain, + "ainvoke", + (), + { + "input": {"input": "Sally is 13"}, + "config": { + "tags": ["bar"], + "metadata": {"id": "123"}, + }, + "return_only_outputs": True, + }, + chat_completion_recorded_events_invoke, + id="chain.ainvoke-with-only-kwargs", + ), + pytest.param( + create_structured_output_chain, + "ainvoke", + ({"input": "Sally is 13"},), + { + "return_only_outputs": True, + }, + chat_completion_recorded_events_invoke_no_metadata_or_tags, + id="chain.ainvoke-with-only-args", + ), + ), +) +def test_async_langchain_chain( + set_trace_info, + json_schema, + prompt, + chat_openai_client, + create_function, + call_function, + call_function_args, + call_function_kwargs, + expected_events, + loop, +): + @reset_core_stats_engine() + @validate_custom_events(expected_events) + # 3 langchain events and 5 openai events. + @validate_custom_event_count(count=8) + @validate_transaction_metrics( + name="test_chain:test_async_langchain_chain.._test", + scoped_metrics=[("Llm/chain/Langchain/%s" % call_function, 1)], + rollup_metrics=[("Llm/chain/Langchain/%s" % call_function, 1)], + custom_metrics=[ + ("Supportability/Python/ML/Langchain/%s" % langchain.__version__, 1), + ], + background_task=True, + ) + @validate_attributes("agent", ["llm"]) + @background_task() + def _test(): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") + + runnable = create_function(json_schema, chat_openai_client, prompt) + + loop.run_until_complete(getattr(runnable, call_function)(*call_function_args, **call_function_kwargs)) + + _test() + + +@pytest.mark.parametrize( + "create_function,call_function,call_function_args,call_function_kwargs,expected_events", + ( + pytest.param( + create_structured_output_runnable, + "ainvoke", + ({"input": "Sally is 13"},), + { + "config": { + "tags": [], + "metadata": {"id": "123"}, + } + }, + chat_completion_recorded_events_runnable_invoke_openai_error, + id="runnable_chain.ainvoke-with-args-and-kwargs", + ), + pytest.param( + create_structured_output_runnable, + "ainvoke", + (), + { + "input": {"input": "Sally is 13"}, + "config": { + "metadata": {"id": "123"}, + }, + }, + chat_completion_recorded_events_runnable_invoke_openai_error, + id="runnable_chain.ainvoke-only-kwargs", + ), + pytest.param( + create_structured_output_chain, + "ainvoke", + ({"input": "Sally is 13"},), + { + "config": { + "tags": [], + "metadata": {"id": "123"}, + }, + "return_only_outputs": True, + }, + chat_completion_recorded_events_runnable_invoke_openai_error, + id="chain.arun-with-args-and-kwargs", + ), + pytest.param( + create_structured_output_chain, + "ainvoke", + (), + { + "input": {"input": "Sally is 13"}, + "config": { + "metadata": {"id": "123"}, + }, + "return_only_outputs": True, + }, + chat_completion_recorded_events_runnable_invoke_openai_error, + id="chain.arun-only-kwargs", + ), + ), +) +def test_async_langchain_chain_error_in_openai( + set_trace_info, + chat_openai_client, + json_schema, + prompt_openai_error, + create_function, + call_function, + call_function_args, + call_function_kwargs, + expected_events, + loop, +): + @reset_core_stats_engine() + @validate_transaction_error_event_count(1) + @validate_custom_events(expected_events) + @validate_custom_event_count(count=6) + @validate_transaction_metrics( + name="test_chain:test_async_langchain_chain_error_in_openai.._test", + scoped_metrics=[("Llm/chain/Langchain/%s" % call_function, 1)], + rollup_metrics=[("Llm/chain/Langchain/%s" % call_function, 1)], + custom_metrics=[ + ("Supportability/Python/ML/Langchain/%s" % langchain.__version__, 1), + ], + background_task=True, + ) + @background_task() + def _test(): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") + + runnable = create_function(json_schema, chat_openai_client, prompt_openai_error) + + with pytest.raises(openai.AuthenticationError): + loop.run_until_complete(getattr(runnable, call_function)(*call_function_args, **call_function_kwargs)) + + _test() + + +@pytest.mark.parametrize( + "create_function,call_function,call_function_args,call_function_kwargs,expected_events,expected_error", + ( + pytest.param( + create_structured_output_runnable, + "ainvoke", + ({"no-exist": "Sally is 13"},), + { + "config": { + "metadata": {"id": "123"}, + } + }, + chat_completion_recorded_events_invoke_langchain_error, + KeyError, + id="runnable_chain.ainvoke", + ), + pytest.param( + create_structured_output_chain, + "ainvoke", + ({"no-exist": "Sally is 13"},), + { + "config": { + "metadata": {"id": "123"}, + }, + "return_only_outputs": True, + }, + chat_completion_recorded_events_invoke_langchain_error, + ValueError, + id="chain.ainvoke", + ), + ), +) +def test_async_langchain_chain_error_in_langchain( + set_trace_info, + chat_openai_client, + json_schema, + prompt, + create_function, + call_function, + call_function_args, + call_function_kwargs, + expected_events, + expected_error, + loop, +): + @reset_core_stats_engine() + @validate_transaction_error_event_count(1) + @validate_error_trace_attributes( + callable_name(expected_error), + ) + @validate_custom_events(expected_events) + @validate_custom_event_count(count=2) + @validate_transaction_metrics( + name="test_chain:test_async_langchain_chain_error_in_langchain.._test", + scoped_metrics=[("Llm/chain/Langchain/%s" % call_function, 1)], + rollup_metrics=[("Llm/chain/Langchain/%s" % call_function, 1)], + custom_metrics=[ + ("Supportability/Python/ML/Langchain/%s" % langchain.__version__, 1), + ], + background_task=True, + ) + @background_task() + def _test(): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") + + runnable = create_function(json_schema, chat_openai_client, prompt) + + with pytest.raises(expected_error): + loop.run_until_complete(getattr(runnable, call_function)(*call_function_args, **call_function_kwargs)) + + _test() + + +@pytest.mark.parametrize( + "create_function,call_function,call_function_args,call_function_kwargs,expected_events,expected_error", + ( + pytest.param( + create_structured_output_runnable, + "ainvoke", + ({"no-exist": "Sally is 13"},), + { + "config": { + "metadata": {"id": "123"}, + } + }, + events_sans_content(chat_completion_recorded_events_invoke_langchain_error), + KeyError, + id="runnable_chain.ainvoke", + ), + pytest.param( + create_structured_output_chain, + "ainvoke", + ({"no-exist": "Sally is 13"},), + { + "config": { + "metadata": {"id": "123"}, + }, + "return_only_outputs": True, + }, + events_sans_content(chat_completion_recorded_events_invoke_langchain_error), + ValueError, + id="chain.ainvoke", + ), + ), +) +def test_async_langchain_chain_error_in_langchain_no_content( + set_trace_info, + chat_openai_client, + json_schema, + prompt, + create_function, + call_function, + call_function_args, + call_function_kwargs, + expected_events, + expected_error, + loop, +): + @reset_core_stats_engine() + @disabled_ai_monitoring_record_content_settings + @validate_transaction_error_event_count(1) + @validate_error_trace_attributes( + callable_name(expected_error), + ) + @validate_custom_events(expected_events) + @validate_custom_event_count(count=2) + @validate_transaction_metrics( + name="test_chain:test_async_langchain_chain_error_in_langchain_no_content.._test", + scoped_metrics=[("Llm/chain/Langchain/%s" % call_function, 1)], + rollup_metrics=[("Llm/chain/Langchain/%s" % call_function, 1)], + custom_metrics=[ + ("Supportability/Python/ML/Langchain/%s" % langchain.__version__, 1), + ], + background_task=True, + ) + @background_task() + def _test(): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") + + runnable = create_function(json_schema, chat_openai_client, prompt) + + with pytest.raises(expected_error): + loop.run_until_complete(getattr(runnable, call_function)(*call_function_args, **call_function_kwargs)) + + _test() + + +@pytest.mark.parametrize( + "create_function,call_function,input_", + ( + (create_structured_output_runnable, "ainvoke", {"input": "Sally is 13"}), + (create_structured_output_chain, "arun", "Sally is 13"), + ), +) +@reset_core_stats_engine() +@validate_custom_event_count(count=0) +def test_async_langchain_chain_outside_transaction( + set_trace_info, chat_openai_client, json_schema, prompt, create_function, call_function, input_, loop +): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") + + runnable = create_function(json_schema, chat_openai_client, prompt) + + loop.run_until_complete(getattr(runnable, call_function)(input_)) + + +@pytest.mark.parametrize( + "create_function,call_function,call_function_args,call_function_kwargs,expected_events", + ( + pytest.param( + create_structured_output_runnable, + "ainvoke", + ({"input": "Sally is 13"},), + { + "config": { + "tags": ["bar"], + "metadata": {"id": "123"}, + } + }, + chat_completion_recorded_events_runnable_invoke, + id="runnable_chain.ainvoke-with-args-and-kwargs", + ), + pytest.param( + create_structured_output_chain, + "ainvoke", + ({"input": "Sally is 13"},), + { + "config": { + "tags": ["bar"], + "metadata": {"id": "123"}, + }, + "return_only_outputs": True, + }, + chat_completion_recorded_events_invoke, + id="chain.ainvoke-with-args-and-kwargs", + ), + ), +) +def test_multiple_async_langchain_chain( + set_trace_info, + json_schema, + prompt, + chat_openai_client, + create_function, + call_function, + call_function_args, + call_function_kwargs, + expected_events, + loop, +): + call1 = expected_events.copy() + call1[0][1]["request_id"] = "b1883d9d-10d6-4b67-a911-f72849704e92" + call1[1][1]["request_id"] = "b1883d9d-10d6-4b67-a911-f72849704e92" + call1[2][1]["request_id"] = "b1883d9d-10d6-4b67-a911-f72849704e92" + call2 = expected_events.copy() + call2[0][1]["request_id"] = "a58aa0c0-c854-4657-9e7b-4cce442f3b61" + call2[1][1]["request_id"] = "a58aa0c0-c854-4657-9e7b-4cce442f3b61" + call2[2][1]["request_id"] = "a58aa0c0-c854-4657-9e7b-4cce442f3b61" + + @reset_core_stats_engine() + @validate_custom_events(call1 + call2) + # 3 langchain events and 5 openai events. + @validate_custom_event_count(count=16) + @validate_transaction_metrics( + name="test_chain:test_multiple_async_langchain_chain.._test", + scoped_metrics=[("Llm/chain/Langchain/%s" % call_function, 2)], + rollup_metrics=[("Llm/chain/Langchain/%s" % call_function, 2)], + custom_metrics=[ + ("Supportability/Python/ML/Langchain/%s" % langchain.__version__, 1), + ], + background_task=True, + ) + @background_task() + def _test(): + with patch("langchain_core.callbacks.manager.uuid", autospec=True) as mock_uuid: + mock_uuid.uuid4.side_effect = [ + uuid.UUID("b1883d9d-10d6-4b67-a911-f72849704e92"), # first call + uuid.UUID("a58aa0c0-c854-4657-9e7b-4cce442f3b61"), + uuid.UUID("a58aa0c0-c854-4657-9e7b-4cce442f3b61"), # second call + uuid.UUID("a58aa0c0-c854-4657-9e7b-4cce442f3b63"), + uuid.UUID("b1883d9d-10d6-4b67-a911-f72849704e93"), + uuid.UUID("a58aa0c0-c854-4657-9e7b-4cce442f3b64"), + uuid.UUID("a58aa0c0-c854-4657-9e7b-4cce442f3b65"), + uuid.UUID("a58aa0c0-c854-4657-9e7b-4cce442f3b66"), + ] + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") + + runnable = create_function(json_schema, chat_openai_client, prompt) + + call1 = asyncio.ensure_future( + getattr(runnable, call_function)(*call_function_args, **call_function_kwargs), loop=loop + ) + call2 = asyncio.ensure_future( + getattr(runnable, call_function)(*call_function_args, **call_function_kwargs), loop=loop + ) + loop.run_until_complete(asyncio.gather(call1, call2)) + + _test() + + +@pytest.fixture +def json_schema(): + return { + "title": "Person", + "description": "Identifying information about a person.", + "type": "object", + "properties": { + "name": {"title": "Name", "description": "The person's name", "type": "string"}, + "age": {"title": "Age", "description": "The person's age", "type": "integer"}, + "fav_food": { + "title": "Fav Food", + "description": "The person's favorite food", + "type": "string", + }, + }, + "required": ["name", "age"], + } + + +@pytest.fixture +def prompt(): + return ChatPromptTemplate.from_messages( + [ + ( + "system", + "You are a world class algorithm for extracting information in structured formats.", + ), + ( + "human", + "Use the given format to extract information from the following input: {input}", + ), + ("human", "Tip: Make sure to answer in the correct format"), + ] + ) + + +@pytest.fixture +def prompt_openai_error(): + return ChatPromptTemplate.from_messages( + [ + ( + "system", + "You are a world class algorithm for extracting information in structured formats with openai failures.", + ), + ( + "human", + "Use the given format to extract information from the following input: {input}", + ), + ("human", "Tip: Make sure to answer in the correct format"), + ] + ) + + +@pytest.fixture +def comma_separated_list_output_parser(): + class _CommaSeparatedListOutputParser(BaseOutputParser): + """Parse the output of an LLM call to a comma-separated list.""" + + def parse(self, text): + """Parse the output of an LLM call.""" + return text.strip().split(", ") + + return _CommaSeparatedListOutputParser() diff --git a/tests/mlmodel_langchain/test_tool.py b/tests/mlmodel_langchain/test_tool.py new file mode 100644 index 000000000..84ba7cf36 --- /dev/null +++ b/tests/mlmodel_langchain/test_tool.py @@ -0,0 +1,490 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import asyncio +import copy +import uuid + +import langchain +import pydantic +import pytest +from langchain.tools import tool +from mock import patch +from testing_support.fixtures import ( + reset_core_stats_engine, + validate_attributes, + validate_custom_event_count, +) +from testing_support.ml_testing_utils import ( # noqa: F401 + disabled_ai_monitoring_record_content_settings, + disabled_ai_monitoring_settings, + set_trace_info, +) +from testing_support.validators.validate_custom_events import validate_custom_events +from testing_support.validators.validate_error_trace_attributes import ( + validate_error_trace_attributes, +) +from testing_support.validators.validate_transaction_error_event_count import ( + validate_transaction_error_event_count, +) +from testing_support.validators.validate_transaction_metrics import ( + validate_transaction_metrics, +) + +from newrelic.api.background_task import background_task +from newrelic.common.object_names import callable_name + + +@pytest.fixture +def single_arg_tool(): + @tool + def _single_arg_tool(query: str): + """A test tool that returns query string""" + return query + + return _single_arg_tool + + +@pytest.fixture +def multi_arg_tool(): + @tool + def _multi_arg_tool(first_num: int, second_num: int): + """A test tool that adds two integers together""" + return first_num + second_num + + return _multi_arg_tool + + +def events_sans_content(event): + new_event = copy.deepcopy(event) + for _event in new_event: + del _event[1]["input"] + if "output" in _event[1]: + del _event[1]["output"] + return new_event + + +single_arg_tool_recorded_events = [ + ( + {"type": "LlmTool"}, + { + "id": None, # UUID that varies with each run + "run_id": None, + "output": "Python Agent", + "name": "_single_arg_tool", + "description": "_single_arg_tool(query: str) - A test tool that returns query string", + "span_id": None, + "trace_id": "trace-id", + "input": "{'query': 'Python Agent'}", + "vendor": "langchain", + "ingest_source": "Python", + "duration": None, + }, + ), +] + + +@reset_core_stats_engine() +@validate_custom_events(single_arg_tool_recorded_events) +@validate_custom_event_count(count=1) +@validate_transaction_metrics( + name="test_tool:test_langchain_single_arg_tool", + scoped_metrics=[("Llm/tool/Langchain/run", 1)], + rollup_metrics=[("Llm/tool/Langchain/run", 1)], + custom_metrics=[ + ("Supportability/Python/ML/Langchain/%s" % langchain.__version__, 1), + ], + background_task=True, +) +@validate_attributes("agent", ["llm"]) +@background_task() +def test_langchain_single_arg_tool(set_trace_info, single_arg_tool): + set_trace_info() + single_arg_tool.run({"query": "Python Agent"}) + + +@reset_core_stats_engine() +@disabled_ai_monitoring_record_content_settings +@validate_custom_events(events_sans_content(single_arg_tool_recorded_events)) +@validate_custom_event_count(count=1) +@validate_transaction_metrics( + name="test_tool:test_langchain_single_arg_tool_no_content", + scoped_metrics=[("Llm/tool/Langchain/run", 1)], + rollup_metrics=[("Llm/tool/Langchain/run", 1)], + custom_metrics=[ + ("Supportability/Python/ML/Langchain/%s" % langchain.__version__, 1), + ], + background_task=True, +) +@validate_attributes("agent", ["llm"]) +@background_task() +def test_langchain_single_arg_tool_no_content(set_trace_info, single_arg_tool): + set_trace_info() + single_arg_tool.run({"query": "Python Agent"}) + + +@reset_core_stats_engine() +@validate_custom_events(single_arg_tool_recorded_events) +@validate_custom_event_count(count=1) +@validate_transaction_metrics( + name="test_tool:test_langchain_single_arg_tool_async", + scoped_metrics=[("Llm/tool/Langchain/arun", 1)], + rollup_metrics=[("Llm/tool/Langchain/arun", 1)], + custom_metrics=[ + ("Supportability/Python/ML/Langchain/%s" % langchain.__version__, 1), + ], + background_task=True, +) +@validate_attributes("agent", ["llm"]) +@background_task() +def test_langchain_single_arg_tool_async(set_trace_info, single_arg_tool, loop): + set_trace_info() + loop.run_until_complete(single_arg_tool.arun({"query": "Python Agent"})) + + +@reset_core_stats_engine() +@disabled_ai_monitoring_record_content_settings +@validate_custom_events(events_sans_content(single_arg_tool_recorded_events)) +@validate_custom_event_count(count=1) +@validate_transaction_metrics( + name="test_tool:test_langchain_single_arg_tool_async_no_content", + scoped_metrics=[("Llm/tool/Langchain/arun", 1)], + rollup_metrics=[("Llm/tool/Langchain/arun", 1)], + custom_metrics=[ + ("Supportability/Python/ML/Langchain/%s" % langchain.__version__, 1), + ], + background_task=True, +) +@validate_attributes("agent", ["llm"]) +@background_task() +def test_langchain_single_arg_tool_async_no_content(set_trace_info, single_arg_tool, loop): + set_trace_info() + loop.run_until_complete(single_arg_tool.arun({"query": "Python Agent"})) + + +multi_arg_tool_recorded_events = [ + ( + {"type": "LlmTool"}, + { + "id": None, # UUID that varies with each run + "run_id": None, + "output": "81", + "name": "_multi_arg_tool", + "description": "_multi_arg_tool(first_num: int, second_num: int) - A test tool that adds two integers together", + "span_id": None, + "trace_id": "trace-id", + "input": "{'first_num': 53, 'second_num': 28}", + "vendor": "langchain", + "ingest_source": "Python", + "duration": None, + "tags": "['python', 'test_tags']", + "metadata.test": "langchain", + "metadata.test_run": True, + }, + ), +] + + +@reset_core_stats_engine() +@validate_custom_events(multi_arg_tool_recorded_events) +@validate_custom_event_count(count=1) +@validate_transaction_metrics( + name="test_tool:test_langchain_multi_arg_tool", + scoped_metrics=[("Llm/tool/Langchain/run", 1)], + rollup_metrics=[("Llm/tool/Langchain/run", 1)], + custom_metrics=[ + ("Supportability/Python/ML/Langchain/%s" % langchain.__version__, 1), + ], + background_task=True, +) +@background_task() +def test_langchain_multi_arg_tool(set_trace_info, multi_arg_tool): + set_trace_info() + multi_arg_tool.metadata = {"test_run": True} + multi_arg_tool.tags = ["test_tags"] + multi_arg_tool.run( + {"first_num": 53, "second_num": 28}, + tags=["python"], + metadata={"test": "langchain"}, + ) + + +@reset_core_stats_engine() +@validate_custom_events(multi_arg_tool_recorded_events) +@validate_custom_event_count(count=1) +@validate_transaction_metrics( + name="test_tool:test_langchain_multi_arg_tool_async", + scoped_metrics=[("Llm/tool/Langchain/arun", 1)], + rollup_metrics=[("Llm/tool/Langchain/arun", 1)], + custom_metrics=[ + ("Supportability/Python/ML/Langchain/%s" % langchain.__version__, 1), + ], + background_task=True, +) +@background_task() +def test_langchain_multi_arg_tool_async(set_trace_info, multi_arg_tool, loop): + set_trace_info() + multi_arg_tool.metadata = {"test_run": True} + multi_arg_tool.tags = ["test_tags"] + loop.run_until_complete( + multi_arg_tool.arun( + {"first_num": 53, "second_num": 28}, + tags=["python"], + metadata={"test": "langchain"}, + ) + ) + + +multi_arg_error_recorded_events = [ + ( + {"type": "LlmTool"}, + { + "id": None, # UUID that varies with each run + "run_id": None, # No run ID created on error + "name": "_multi_arg_tool", + "description": "_multi_arg_tool(first_num: int, second_num: int) - A test tool that adds two integers together", + "span_id": None, + "trace_id": "trace-id", + "input": "{'first_num': 53}", + "vendor": "langchain", + "ingest_source": "Python", + "duration": None, + "tags": "['test_tags', 'python']", + "metadata.test": "langchain", + "metadata.test_run": True, + "error": True, + }, + ), +] + + +@reset_core_stats_engine() +@validate_transaction_error_event_count(1) +@validate_error_trace_attributes( + callable_name(pydantic.v1.error_wrappers.ValidationError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": {}, + }, +) +@validate_custom_events(multi_arg_error_recorded_events) +@validate_custom_event_count(count=1) +@validate_transaction_metrics( + name="test_tool:test_langchain_error_in_run", + scoped_metrics=[("Llm/tool/Langchain/run", 1)], + rollup_metrics=[("Llm/tool/Langchain/run", 1)], + custom_metrics=[ + ("Supportability/Python/ML/Langchain/%s" % langchain.__version__, 1), + ], + background_task=True, +) +@background_task() +def test_langchain_error_in_run(set_trace_info, multi_arg_tool): + with pytest.raises(pydantic.v1.error_wrappers.ValidationError): + set_trace_info() + # Only one argument is provided while the tool expects two to create an error + multi_arg_tool.run( + {"first_num": 53}, tags=["test_tags", "python"], metadata={"test_run": True, "test": "langchain"} + ) + + +@reset_core_stats_engine() +@disabled_ai_monitoring_record_content_settings +@validate_transaction_error_event_count(1) +@validate_error_trace_attributes( + callable_name(pydantic.v1.error_wrappers.ValidationError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": {}, + }, +) +@validate_custom_events(events_sans_content(multi_arg_error_recorded_events)) +@validate_custom_event_count(count=1) +@validate_transaction_metrics( + name="test_tool:test_langchain_error_in_run_no_content", + scoped_metrics=[("Llm/tool/Langchain/run", 1)], + rollup_metrics=[("Llm/tool/Langchain/run", 1)], + custom_metrics=[ + ("Supportability/Python/ML/Langchain/%s" % langchain.__version__, 1), + ], + background_task=True, +) +@background_task() +def test_langchain_error_in_run_no_content(set_trace_info, multi_arg_tool): + with pytest.raises(pydantic.v1.error_wrappers.ValidationError): + set_trace_info() + # Only one argument is provided while the tool expects two to create an error + multi_arg_tool.run( + {"first_num": 53}, tags=["test_tags", "python"], metadata={"test_run": True, "test": "langchain"} + ) + + +@reset_core_stats_engine() +@validate_transaction_error_event_count(1) +@validate_error_trace_attributes( + callable_name(pydantic.v1.error_wrappers.ValidationError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": {}, + }, +) +@validate_custom_events(multi_arg_error_recorded_events) +@validate_custom_event_count(count=1) +@validate_transaction_metrics( + name="test_tool:test_langchain_error_in_run_async", + scoped_metrics=[("Llm/tool/Langchain/arun", 1)], + rollup_metrics=[("Llm/tool/Langchain/arun", 1)], + custom_metrics=[ + ("Supportability/Python/ML/Langchain/%s" % langchain.__version__, 1), + ], + background_task=True, +) +@background_task() +def test_langchain_error_in_run_async(set_trace_info, multi_arg_tool, loop): + with pytest.raises(pydantic.v1.error_wrappers.ValidationError): + set_trace_info() + # Only one argument is provided while the tool expects two to create an error + loop.run_until_complete( + multi_arg_tool.arun( + {"first_num": 53}, tags=["test_tags", "python"], metadata={"test_run": True, "test": "langchain"} + ) + ) + + +@reset_core_stats_engine() +@disabled_ai_monitoring_record_content_settings +@validate_transaction_error_event_count(1) +@validate_error_trace_attributes( + callable_name(pydantic.v1.error_wrappers.ValidationError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": {}, + }, +) +@validate_custom_events(events_sans_content(multi_arg_error_recorded_events)) +@validate_custom_event_count(count=1) +@validate_transaction_metrics( + name="test_tool:test_langchain_error_in_run_async_no_content", + scoped_metrics=[("Llm/tool/Langchain/arun", 1)], + rollup_metrics=[("Llm/tool/Langchain/arun", 1)], + custom_metrics=[ + ("Supportability/Python/ML/Langchain/%s" % langchain.__version__, 1), + ], + background_task=True, +) +@background_task() +def test_langchain_error_in_run_async_no_content(set_trace_info, multi_arg_tool, loop): + with pytest.raises(pydantic.v1.error_wrappers.ValidationError): + set_trace_info() + # Only one argument is provided while the tool expects two to create an error + loop.run_until_complete( + multi_arg_tool.arun( + {"first_num": 53}, tags=["test_tags", "python"], metadata={"test_run": True, "test": "langchain"} + ) + ) + + +@reset_core_stats_engine() +@validate_custom_event_count(count=0) +def test_langchain_tool_outside_txn(single_arg_tool): + single_arg_tool.run( + {"query": "Python Agent"}, tags=["test_tags", "python"], metadata={"test_run": True, "test": "langchain"} + ) + + +@reset_core_stats_engine() +@validate_custom_event_count(count=0) +def test_langchain_tool_outside_txn_async(single_arg_tool, loop): + loop.run_until_complete( + single_arg_tool.arun( + {"query": "Python Agent"}, tags=["test_tags", "python"], metadata={"test_run": True, "test": "langchain"} + ) + ) + + +@disabled_ai_monitoring_settings +@reset_core_stats_engine() +@validate_custom_event_count(count=0) +@background_task() +def test_langchain_tool_disabled_ai_monitoring_events_sync(set_trace_info, single_arg_tool): + set_trace_info() + single_arg_tool.run( + {"query": "Python Agent"}, tags=["test_tags", "python"], metadata={"test_run": True, "test": "langchain"} + ) + + +@disabled_ai_monitoring_settings +@reset_core_stats_engine() +@validate_custom_event_count(count=0) +@background_task() +def test_langchain_tool_disabled_ai_monitoring_events_async(set_trace_info, single_arg_tool, loop): + set_trace_info() + loop.run_until_complete( + single_arg_tool.arun( + {"query": "Python Agent"}, tags=["test_tags", "python"], metadata={"test_run": True, "test": "langchain"} + ) + ) + + +def test_langchain_multiple_async_calls(set_trace_info, single_arg_tool, multi_arg_tool, loop): + call1 = single_arg_tool_recorded_events.copy() + call1[0][1]["run_id"] = "b1883d9d-10d6-4b67-a911-f72849704e92" + call2 = multi_arg_tool_recorded_events.copy() + call2[0][1]["run_id"] = "a58aa0c0-c854-4657-9e7b-4cce442f3b61" + expected_events = call1 + call2 + + @reset_core_stats_engine() + @validate_custom_events(expected_events) + @validate_custom_event_count(count=2) + @validate_transaction_metrics( + name="test_tool:test_langchain_multiple_async_calls.._test", + custom_metrics=[ + ("Supportability/Python/ML/Langchain/%s" % langchain.__version__, 1), + ], + background_task=True, + ) + @background_task() + def _test(): + set_trace_info() + + with patch("langchain_core.callbacks.manager.uuid", autospec=True) as mock_uuid: + mock_uuid.uuid4.side_effect = [ + uuid.UUID("b1883d9d-10d6-4b67-a911-f72849704e92"), # first call + uuid.UUID("a58aa0c0-c854-4657-9e7b-4cce442f3b61"), + uuid.UUID("a58aa0c0-c854-4657-9e7b-4cce442f3b61"), # second call + uuid.UUID("a58aa0c0-c854-4657-9e7b-4cce442f3b63"), + uuid.UUID("b1883d9d-10d6-4b67-a911-f72849704e93"), + uuid.UUID("a58aa0c0-c854-4657-9e7b-4cce442f3b64"), + uuid.UUID("a58aa0c0-c854-4657-9e7b-4cce442f3b65"), + uuid.UUID("a58aa0c0-c854-4657-9e7b-4cce442f3b66"), + ] + + loop.run_until_complete( + asyncio.gather( + single_arg_tool.arun( + {"query": "Python Agent"}, + ), + multi_arg_tool.arun( + {"first_num": 53, "second_num": 28}, + tags=["python", "test_tags"], + metadata={"test": "langchain", "test_run": True}, + ), + ) + ) + + _test() diff --git a/tests/mlmodel_langchain/test_vectorstore.py b/tests/mlmodel_langchain/test_vectorstore.py new file mode 100644 index 000000000..e090b1892 --- /dev/null +++ b/tests/mlmodel_langchain/test_vectorstore.py @@ -0,0 +1,461 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +import os + +import langchain +import pytest +from langchain_community.document_loaders import PyPDFLoader +from langchain_community.vectorstores.faiss import FAISS +from testing_support.fixtures import ( + reset_core_stats_engine, + validate_attributes, + validate_custom_event_count, +) +from testing_support.ml_testing_utils import ( # noqa: F401 + disabled_ai_monitoring_record_content_settings, + disabled_ai_monitoring_settings, + set_trace_info, +) +from testing_support.validators.validate_custom_events import validate_custom_events +from testing_support.validators.validate_error_trace_attributes import ( + validate_error_trace_attributes, +) +from testing_support.validators.validate_transaction_metrics import ( + validate_transaction_metrics, +) + +from newrelic.api.background_task import background_task +from newrelic.api.transaction import add_custom_attribute +from newrelic.common.object_names import callable_name + + +def vectorstore_events_sans_content(event): + new_event = copy.deepcopy(event) + for _event in new_event: + if "request.query" in _event[1]: + del _event[1]["request.query"] + if "page_content" in _event[1]: + del _event[1]["page_content"] + return new_event + + +vectorstore_recorded_events = [ + ( + {"type": "LlmVectorSearch", "timestamp": 1702052394890}, + { + "span_id": None, + "trace_id": "trace-id", + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "id": None, # UUID that changes with each run + "vendor": "langchain", + "ingest_source": "Python", + "request.query": "Complete this sentence: Hello", + "request.k": 1, + "duration": None, # Changes with every run + "response.number_of_documents": 1, + }, + ), + ( + {"type": "LlmVectorSearchResult", "timestamp": 1702052424031}, + { + "search_id": None, # UUID that changes with each run + "sequence": 0, + "page_content": "Hello world!\n1", + "span_id": None, + "trace_id": "trace-id", + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "id": None, # UUID that changes with each run + "vendor": "langchain", + "ingest_source": "Python", + "metadata.source": os.path.join(os.path.dirname(__file__), "hello.pdf"), + "metadata.page": 0, + }, + ), +] + + +_test_vectorstore_modules_instrumented_ignored_classes = set( + [ + "VectorStore", # Base class + "ElasticKnnSearch", # Deprecated, so we will not be instrumenting this. + ] +) + + +# Test to check if all classes containing "similarity_search" +# method are instrumented. Prints out anything that is not +# instrumented to identify when new vectorstores are added. +def test_vectorstore_modules_instrumented(): + from langchain_community import vectorstores + + vector_store_classes = tuple(vectorstores.__all__) + uninstrumented_sync_classes = [] + uninstrumented_async_classes = [] + for class_name in vector_store_classes: + class_ = getattr(vectorstores, class_name) + if ( + not hasattr(class_, "similarity_search") + or class_name in _test_vectorstore_modules_instrumented_ignored_classes + ): + # If "similarity_search" is found, "asimilarity_search" will + # also be found, so separate logic is not necessary to check this. + continue + + if not hasattr(getattr(class_, "similarity_search"), "__wrapped__"): + uninstrumented_sync_classes.append(class_name) + if not hasattr(getattr(class_, "asimilarity_search"), "__wrapped__"): + uninstrumented_async_classes.append(class_name) + + assert not uninstrumented_sync_classes, "Uninstrumented sync classes found: %s" % str(uninstrumented_sync_classes) + assert not uninstrumented_async_classes, "Uninstrumented async classes found: %s" % str( + uninstrumented_async_classes + ) + + +@reset_core_stats_engine() +@validate_custom_events(vectorstore_recorded_events) +# Two OpenAI LlmEmbedded, two LangChain LlmVectorSearch +@validate_custom_event_count(count=4) +@validate_transaction_metrics( + name="test_vectorstore:test_pdf_pagesplitter_vectorstore_in_txn", + scoped_metrics=[("Llm/vectorstore/Langchain/similarity_search", 1)], + rollup_metrics=[("Llm/vectorstore/Langchain/similarity_search", 1)], + custom_metrics=[ + ("Supportability/Python/ML/Langchain/%s" % langchain.__version__, 1), + ], + background_task=True, +) +@validate_attributes("agent", ["llm"]) +@background_task() +def test_pdf_pagesplitter_vectorstore_in_txn(set_trace_info, embedding_openai_client): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") + + script_dir = os.path.dirname(__file__) + loader = PyPDFLoader(os.path.join(script_dir, "hello.pdf")) + docs = loader.load() + + faiss_index = FAISS.from_documents(docs, embedding_openai_client) + docs = faiss_index.similarity_search("Complete this sentence: Hello", k=1) + assert "Hello world" in docs[0].page_content + + +@reset_core_stats_engine() +@disabled_ai_monitoring_record_content_settings +@validate_custom_events(vectorstore_events_sans_content(vectorstore_recorded_events)) +# Two OpenAI LlmEmbedded, two LangChain LlmVectorSearch +@validate_custom_event_count(count=4) +@validate_transaction_metrics( + name="test_vectorstore:test_pdf_pagesplitter_vectorstore_in_txn_no_content", + scoped_metrics=[("Llm/vectorstore/Langchain/similarity_search", 1)], + rollup_metrics=[("Llm/vectorstore/Langchain/similarity_search", 1)], + custom_metrics=[ + ("Supportability/Python/ML/Langchain/%s" % langchain.__version__, 1), + ], + background_task=True, +) +@validate_attributes("agent", ["llm"]) +@background_task() +def test_pdf_pagesplitter_vectorstore_in_txn_no_content(set_trace_info, embedding_openai_client): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") + + script_dir = os.path.dirname(__file__) + loader = PyPDFLoader(os.path.join(script_dir, "hello.pdf")) + docs = loader.load() + + faiss_index = FAISS.from_documents(docs, embedding_openai_client) + docs = faiss_index.similarity_search("Complete this sentence: Hello", k=1) + assert "Hello world" in docs[0].page_content + + +@reset_core_stats_engine() +@validate_custom_event_count(count=0) +def test_pdf_pagesplitter_vectorstore_outside_txn(set_trace_info, embedding_openai_client): + set_trace_info() + + script_dir = os.path.dirname(__file__) + loader = PyPDFLoader(os.path.join(script_dir, "hello.pdf")) + docs = loader.load() + + faiss_index = FAISS.from_documents(docs, embedding_openai_client) + docs = faiss_index.similarity_search("Complete this sentence: Hello", k=1) + assert "Hello world" in docs[0].page_content + + +@disabled_ai_monitoring_settings +@reset_core_stats_engine() +@validate_custom_event_count(count=0) +@background_task() +def test_pdf_pagesplitter_vectorstore_ai_monitoring_disabled(set_trace_info, embedding_openai_client): + set_trace_info() + + script_dir = os.path.dirname(__file__) + loader = PyPDFLoader(os.path.join(script_dir, "hello.pdf")) + docs = loader.load() + + faiss_index = FAISS.from_documents(docs, embedding_openai_client) + docs = faiss_index.similarity_search("Complete this sentence: Hello", k=1) + assert "Hello world" in docs[0].page_content + + +@reset_core_stats_engine() +@validate_custom_events(vectorstore_recorded_events) +# Two OpenAI LlmEmbedded, two LangChain LlmVectorSearch +@validate_custom_event_count(count=4) +@validate_transaction_metrics( + name="test_vectorstore:test_async_pdf_pagesplitter_vectorstore_in_txn", + scoped_metrics=[("Llm/vectorstore/Langchain/asimilarity_search", 1)], + rollup_metrics=[("Llm/vectorstore/Langchain/asimilarity_search", 1)], + custom_metrics=[ + ("Supportability/Python/ML/Langchain/%s" % langchain.__version__, 1), + ], + background_task=True, +) +@validate_attributes("agent", ["llm"]) +@background_task() +def test_async_pdf_pagesplitter_vectorstore_in_txn(loop, set_trace_info, embedding_openai_client): + async def _test(): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") + + script_dir = os.path.dirname(__file__) + loader = PyPDFLoader(os.path.join(script_dir, "hello.pdf")) + docs = loader.load() + + faiss_index = await FAISS.afrom_documents(docs, embedding_openai_client) + docs = await faiss_index.asimilarity_search("Complete this sentence: Hello", k=1) + return docs + + docs = loop.run_until_complete(_test()) + assert "Hello world" in docs[0].page_content + + +@reset_core_stats_engine() +@disabled_ai_monitoring_record_content_settings +@validate_custom_events(vectorstore_events_sans_content(vectorstore_recorded_events)) +# Two OpenAI LlmEmbedded, two LangChain LlmVectorSearch +@validate_custom_event_count(count=4) +@validate_transaction_metrics( + name="test_vectorstore:test_async_pdf_pagesplitter_vectorstore_in_txn_no_content", + scoped_metrics=[("Llm/vectorstore/Langchain/asimilarity_search", 1)], + rollup_metrics=[("Llm/vectorstore/Langchain/asimilarity_search", 1)], + custom_metrics=[ + ("Supportability/Python/ML/Langchain/%s" % langchain.__version__, 1), + ], + background_task=True, +) +@validate_attributes("agent", ["llm"]) +@background_task() +def test_async_pdf_pagesplitter_vectorstore_in_txn_no_content(loop, set_trace_info, embedding_openai_client): + async def _test(): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") + + script_dir = os.path.dirname(__file__) + loader = PyPDFLoader(os.path.join(script_dir, "hello.pdf")) + docs = loader.load() + + faiss_index = await FAISS.afrom_documents(docs, embedding_openai_client) + docs = await faiss_index.asimilarity_search("Complete this sentence: Hello", k=1) + return docs + + docs = loop.run_until_complete(_test()) + assert "Hello world" in docs[0].page_content + + +@reset_core_stats_engine() +@validate_custom_event_count(count=0) +def test_async_pdf_pagesplitter_vectorstore_outside_txn(loop, set_trace_info, embedding_openai_client): + async def _test(): + set_trace_info() + + script_dir = os.path.dirname(__file__) + loader = PyPDFLoader(os.path.join(script_dir, "hello.pdf")) + docs = loader.load() + + faiss_index = await FAISS.afrom_documents(docs, embedding_openai_client) + docs = await faiss_index.asimilarity_search("Complete this sentence: Hello", k=1) + return docs + + docs = loop.run_until_complete(_test()) + assert "Hello world" in docs[0].page_content + + +@disabled_ai_monitoring_settings +@reset_core_stats_engine() +@validate_custom_event_count(count=0) +def test_async_pdf_pagesplitter_vectorstore_ai_monitoring_disabled(loop, set_trace_info, embedding_openai_client): + async def _test(): + set_trace_info() + + script_dir = os.path.dirname(__file__) + loader = PyPDFLoader(os.path.join(script_dir, "hello.pdf")) + docs = loader.load() + + faiss_index = await FAISS.afrom_documents(docs, embedding_openai_client) + docs = await faiss_index.asimilarity_search("Complete this sentence: Hello", k=1) + return docs + + docs = loop.run_until_complete(_test()) + assert "Hello world" in docs[0].page_content + + +vectorstore_error_events = [ + ( + {"type": "LlmVectorSearch"}, + { + "id": None, # UUID that varies with each run + "request.query": "Complete this sentence: Hello", + "request.k": -1, + "span_id": None, + "trace_id": "trace-id", + "vendor": "langchain", + "ingest_source": "Python", + "error": True, + }, + ), +] + + +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(AssertionError), + required_params={"user": ["vector_store_id"], "intrinsic": [], "agent": []}, +) +@validate_custom_events(vectorstore_error_events) +@validate_transaction_metrics( + name="test_vectorstore:test_vectorstore_error", + scoped_metrics=[("Llm/vectorstore/Langchain/similarity_search", 1)], + rollup_metrics=[("Llm/vectorstore/Langchain/similarity_search", 1)], + custom_metrics=[ + ("Supportability/Python/ML/Langchain/%s" % langchain.__version__, 1), + ], + background_task=True, +) +@background_task() +def test_vectorstore_error(set_trace_info, embedding_openai_client, loop): + with pytest.raises(AssertionError): + set_trace_info() + script_dir = os.path.dirname(__file__) + loader = PyPDFLoader(os.path.join(script_dir, "hello.pdf")) + docs = loader.load() + + faiss_index = FAISS.from_documents(docs, embedding_openai_client) + faiss_index.similarity_search(query="Complete this sentence: Hello", k=-1) + + +@reset_core_stats_engine() +@disabled_ai_monitoring_record_content_settings +@validate_error_trace_attributes( + callable_name(AssertionError), + required_params={"user": ["vector_store_id"], "intrinsic": [], "agent": []}, +) +@validate_custom_events(vectorstore_events_sans_content(vectorstore_error_events)) +@validate_transaction_metrics( + name="test_vectorstore:test_vectorstore_error_no_content", + scoped_metrics=[("Llm/vectorstore/Langchain/similarity_search", 1)], + rollup_metrics=[("Llm/vectorstore/Langchain/similarity_search", 1)], + custom_metrics=[ + ("Supportability/Python/ML/Langchain/%s" % langchain.__version__, 1), + ], + background_task=True, +) +@background_task() +def test_vectorstore_error_no_content(set_trace_info, embedding_openai_client): + with pytest.raises(AssertionError): + set_trace_info() + script_dir = os.path.dirname(__file__) + loader = PyPDFLoader(os.path.join(script_dir, "hello.pdf")) + docs = loader.load() + + faiss_index = FAISS.from_documents(docs, embedding_openai_client) + faiss_index.similarity_search(query="Complete this sentence: Hello", k=-1) + + +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(AssertionError), + required_params={"user": ["vector_store_id"], "intrinsic": [], "agent": []}, +) +@validate_custom_events(vectorstore_error_events) +@validate_transaction_metrics( + name="test_vectorstore:test_async_vectorstore_error", + scoped_metrics=[("Llm/vectorstore/Langchain/asimilarity_search", 1)], + rollup_metrics=[("Llm/vectorstore/Langchain/asimilarity_search", 1)], + custom_metrics=[ + ("Supportability/Python/ML/Langchain/%s" % langchain.__version__, 1), + ], + background_task=True, +) +@background_task() +def test_async_vectorstore_error(loop, set_trace_info, embedding_openai_client): + async def _test(): + set_trace_info() + + script_dir = os.path.dirname(__file__) + loader = PyPDFLoader(os.path.join(script_dir, "hello.pdf")) + docs = loader.load() + + faiss_index = await FAISS.afrom_documents(docs, embedding_openai_client) + docs = await faiss_index.asimilarity_search(query="Complete this sentence: Hello", k=-1) + return docs + + with pytest.raises(AssertionError): + loop.run_until_complete(_test()) + + +@reset_core_stats_engine() +@disabled_ai_monitoring_record_content_settings +@validate_error_trace_attributes( + callable_name(AssertionError), + required_params={"user": ["vector_store_id"], "intrinsic": [], "agent": []}, +) +@validate_custom_events(vectorstore_events_sans_content(vectorstore_error_events)) +@validate_transaction_metrics( + name="test_vectorstore:test_async_vectorstore_error_no_content", + scoped_metrics=[("Llm/vectorstore/Langchain/asimilarity_search", 1)], + rollup_metrics=[("Llm/vectorstore/Langchain/asimilarity_search", 1)], + custom_metrics=[ + ("Supportability/Python/ML/Langchain/%s" % langchain.__version__, 1), + ], + background_task=True, +) +@background_task() +def test_async_vectorstore_error_no_content(loop, set_trace_info, embedding_openai_client): + async def _test(): + set_trace_info() + + script_dir = os.path.dirname(__file__) + loader = PyPDFLoader(os.path.join(script_dir, "hello.pdf")) + docs = loader.load() + + faiss_index = await FAISS.afrom_documents(docs, embedding_openai_client) + docs = await faiss_index.asimilarity_search(query="Complete this sentence: Hello", k=-1) + return docs + + with pytest.raises(AssertionError): + loop.run_until_complete(_test()) diff --git a/tests/mlmodel_openai/_mock_external_openai_server.py b/tests/mlmodel_openai/_mock_external_openai_server.py new file mode 100644 index 000000000..c8b844cf3 --- /dev/null +++ b/tests/mlmodel_openai/_mock_external_openai_server.py @@ -0,0 +1,949 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json + +import pytest +from testing_support.mock_external_http_server import MockExternalHTTPServer + +from newrelic.common.package_version_utils import get_package_version_tuple + +# This defines an external server test apps can make requests to instead of +# the real OpenAI backend. This provides 3 features: +# +# 1) This removes dependencies on external websites. +# 2) Provides a better mechanism for making an external call in a test app than +# simple calling another endpoint the test app makes available because this +# server will not be instrumented meaning we don't have to sort through +# transactions to separate the ones created in the test app and the ones +# created by an external call. +# 3) This app runs on a separate thread meaning it won't block the test app. + +STREAMED_RESPONSES = { + "Stream parsing error.": [ + { + "Content-Type": "text/event-stream", + "openai-model": "gpt-3.5-turbo-0613", + "openai-organization": "new-relic-nkmd8b", + "openai-processing-ms": "516", + "openai-version": "2020-10-01", + "x-ratelimit-limit-requests": "200", + "x-ratelimit-limit-tokens": "40000", + "x-ratelimit-remaining-requests": "199", + "x-ratelimit-remaining-tokens": "39940", + "x-ratelimit-reset-requests": "7m12s", + "x-ratelimit-reset-tokens": "90ms", + "x-request-id": "49dbbffbd3c3f4612aa48def69059ccd", + }, + 200, + [ + { + "id": "chatcmpl-87sb95K4EF2nuJRcTs43Tm9ntTemv", + "object": "chat.completion.chunk", + "created": 1706565311, + "model": "gpt-3.5-turbo-0613", + "system_fingerprint": None, + "choices": [ + {"index": 0, "delta": {"role": "assistant", "content": ""}, "logprobs": None, "finish_reason": None} + ], + }, + { + "id": "chatcmpl-87sb95K4EF2nuJRcTs43Tm9ntTemv", + "object": "chat.completion.chunk", + "created": 1706565311, + "model": "gpt-3.5-turbo-0613", + "system_fingerprint": None, + "choices": [{"index": 0, "delta": {"content": "212"}, "logprobs": None, "finish_reason": None}], + }, + ], + ], + "Invalid API key.": [ + {"Content-Type": "application/json; charset=utf-8", "x-request-id": "4f8f61a7d0401e42a6760ea2ca2049f6"}, + 401, + { + "error": { + "message": "Incorrect API key provided: DEADBEEF. You can find your API key at https://platform.openai.com/account/api-keys.", + "type": "invalid_request_error", + "param": None, + "code": "invalid_api_key", + } + }, + ], + "Model does not exist.": [ + { + "Content-Type": "application/json; charset=utf-8", + "x-request-id": "cfdf51fb795362ae578c12a21796262c", + }, + 404, + { + "error": { + "message": "The model `does-not-exist` does not exist", + "type": "invalid_request_error", + "param": None, + "code": "model_not_found", + } + }, + ], + "You are a scientist.": [ + { + "Content-Type": "text/event-stream", + "openai-model": "gpt-3.5-turbo-0613", + "openai-organization": "new-relic-nkmd8b", + "openai-processing-ms": "516", + "openai-version": "2020-10-01", + "x-ratelimit-limit-requests": "200", + "x-ratelimit-limit-tokens": "40000", + "x-ratelimit-remaining-requests": "199", + "x-ratelimit-remaining-tokens": "39940", + "x-ratelimit-reset-requests": "7m12s", + "x-ratelimit-reset-tokens": "90ms", + "x-request-id": "49dbbffbd3c3f4612aa48def69059ccd", + }, + 200, + [ + { + "id": "chatcmpl-87sb95K4EF2nuJRcTs43Tm9ntTemv", + "object": "chat.completion.chunk", + "created": 1706565311, + "model": "gpt-3.5-turbo-0613", + "system_fingerprint": None, + "choices": [ + {"index": 0, "delta": {"role": "assistant", "content": ""}, "logprobs": None, "finish_reason": None} + ], + }, + { + "id": "chatcmpl-87sb95K4EF2nuJRcTs43Tm9ntTemv", + "object": "chat.completion.chunk", + "created": 1706565311, + "model": "gpt-3.5-turbo-0613", + "system_fingerprint": None, + "choices": [{"index": 0, "delta": {"content": "212"}, "logprobs": None, "finish_reason": None}], + }, + { + "id": "chatcmpl-87sb95K4EF2nuJRcTs43Tm9ntTemv", + "object": "chat.completion.chunk", + "created": 1706565311, + "model": "gpt-3.5-turbo-0613", + "system_fingerprint": None, + "choices": [{"index": 0, "delta": {"content": " degrees"}, "logprobs": None, "finish_reason": None}], + }, + { + "id": "chatcmpl-87sb95K4EF2nuJRcTs43Tm9ntTemv", + "object": "chat.completion.chunk", + "created": 1706565311, + "model": "gpt-3.5-turbo-0613", + "system_fingerprint": None, + "choices": [{"index": 0, "delta": {"content": " Fahrenheit"}, "logprobs": None, "finish_reason": None}], + }, + { + "id": "chatcmpl-87sb95K4EF2nuJRcTs43Tm9ntTemv", + "object": "chat.completion.chunk", + "created": 1706565311, + "model": "gpt-3.5-turbo-0613", + "system_fingerprint": None, + "choices": [{"index": 0, "delta": {"content": " is"}, "logprobs": None, "finish_reason": None}], + }, + { + "id": "chatcmpl-87sb95K4EF2nuJRcTs43Tm9ntTemv", + "object": "chat.completion.chunk", + "created": 1706565311, + "model": "gpt-3.5-turbo-0613", + "system_fingerprint": None, + "choices": [{"index": 0, "delta": {"content": " equal"}, "logprobs": None, "finish_reason": None}], + }, + { + "id": "chatcmpl-87sb95K4EF2nuJRcTs43Tm9ntTemv", + "object": "chat.completion.chunk", + "created": 1706565311, + "model": "gpt-3.5-turbo-0613", + "system_fingerprint": None, + "choices": [{"index": 0, "delta": {"content": " to"}, "logprobs": None, "finish_reason": None}], + }, + { + "id": "chatcmpl-87sb95K4EF2nuJRcTs43Tm9ntTemv", + "object": "chat.completion.chunk", + "created": 1706565311, + "model": "gpt-3.5-turbo-0613", + "system_fingerprint": None, + "choices": [{"index": 0, "delta": {"content": " "}, "logprobs": None, "finish_reason": None}], + }, + { + "id": "chatcmpl-87sb95K4EF2nuJRcTs43Tm9ntTemv", + "object": "chat.completion.chunk", + "created": 1706565311, + "model": "gpt-3.5-turbo-0613", + "system_fingerprint": None, + "choices": [{"index": 0, "delta": {"content": "100"}, "logprobs": None, "finish_reason": None}], + }, + { + "id": "chatcmpl-87sb95K4EF2nuJRcTs43Tm9ntTemv", + "object": "chat.completion.chunk", + "created": 1706565311, + "model": "gpt-3.5-turbo-0613", + "system_fingerprint": None, + "choices": [{"index": 0, "delta": {"content": " degrees"}, "logprobs": None, "finish_reason": None}], + }, + { + "id": "chatcmpl-87sb95K4EF2nuJRcTs43Tm9ntTemv", + "object": "chat.completion.chunk", + "created": 1706565311, + "model": "gpt-3.5-turbo-0613", + "system_fingerprint": None, + "choices": [{"index": 0, "delta": {"content": " Celsius"}, "logprobs": None, "finish_reason": None}], + }, + { + "id": "chatcmpl-87sb95K4EF2nuJRcTs43Tm9ntTemv", + "object": "chat.completion.chunk", + "created": 1706565311, + "model": "gpt-3.5-turbo-0613", + "system_fingerprint": None, + "choices": [{"index": 0, "delta": {"content": "."}, "logprobs": None, "finish_reason": None}], + }, + { + "id": "chatcmpl-87sb95K4EF2nuJRcTs43Tm9ntTemv", + "object": "chat.completion.chunk", + "created": 1706565311, + "model": "gpt-3.5-turbo-0613", + "system_fingerprint": None, + "choices": [{"index": 0, "delta": {}, "logprobs": None, "finish_reason": "stop"}], + }, + ], + ], +} + +RESPONSES_V1 = { + "You are a scientist.": [ + { + "Content-Type": "text/event-stream", + "openai-model": "gpt-3.5-turbo-0613", + "openai-organization": "foobar-jtbczk", + "openai-processing-ms": "516", + "openai-version": "2020-10-01", + "x-ratelimit-limit-requests": "200", + "x-ratelimit-limit-tokens": "40000", + "x-ratelimit-remaining-requests": "196", + "x-ratelimit-remaining-tokens": "39880", + "x-ratelimit-reset-requests": "23m5.129s", + "x-ratelimit-reset-tokens": "180ms", + "x-request-id": "5c53c9b80af57a1c9b38568f01dcde7f", + }, + 200, + [ + { + "id": "chatcmpl-87sb95K4EF2nuJRcTs43Tm9ntTemv", + "object": "chat.completion.chunk", + "created": 1706565311, + "model": "gpt-3.5-turbo-0613", + "system_fingerprint": None, + "choices": [ + {"index": 0, "delta": {"role": "assistant", "content": ""}, "logprobs": None, "finish_reason": None} + ], + }, + { + "id": "chatcmpl-87sb95K4EF2nuJRcTs43Tm9ntTemv", + "object": "chat.completion.chunk", + "created": 1706565311, + "model": "gpt-3.5-turbo-0613", + "system_fingerprint": None, + "choices": [{"index": 0, "delta": {"content": "212"}, "logprobs": None, "finish_reason": None}], + }, + { + "id": "chatcmpl-87sb95K4EF2nuJRcTs43Tm9ntTemv", + "object": "chat.completion.chunk", + "created": 1706565311, + "model": "gpt-3.5-turbo-0613", + "system_fingerprint": None, + "choices": [{"index": 0, "delta": {"content": " degrees"}, "logprobs": None, "finish_reason": None}], + }, + { + "id": "chatcmpl-87sb95K4EF2nuJRcTs43Tm9ntTemv", + "object": "chat.completion.chunk", + "created": 1706565311, + "model": "gpt-3.5-turbo-0613", + "system_fingerprint": None, + "choices": [{"index": 0, "delta": {"content": " Fahrenheit"}, "logprobs": None, "finish_reason": None}], + }, + { + "id": "chatcmpl-87sb95K4EF2nuJRcTs43Tm9ntTemv", + "object": "chat.completion.chunk", + "created": 1706565311, + "model": "gpt-3.5-turbo-0613", + "system_fingerprint": None, + "choices": [{"index": 0, "delta": {"content": " is"}, "logprobs": None, "finish_reason": None}], + }, + { + "id": "chatcmpl-87sb95K4EF2nuJRcTs43Tm9ntTemv", + "object": "chat.completion.chunk", + "created": 1706565311, + "model": "gpt-3.5-turbo-0613", + "system_fingerprint": None, + "choices": [{"index": 0, "delta": {"content": " equal"}, "logprobs": None, "finish_reason": None}], + }, + { + "id": "chatcmpl-87sb95K4EF2nuJRcTs43Tm9ntTemv", + "object": "chat.completion.chunk", + "created": 1706565311, + "model": "gpt-3.5-turbo-0613", + "system_fingerprint": None, + "choices": [{"index": 0, "delta": {"content": " to"}, "logprobs": None, "finish_reason": None}], + }, + { + "id": "chatcmpl-87sb95K4EF2nuJRcTs43Tm9ntTemv", + "object": "chat.completion.chunk", + "created": 1706565311, + "model": "gpt-3.5-turbo-0613", + "system_fingerprint": None, + "choices": [{"index": 0, "delta": {"content": " "}, "logprobs": None, "finish_reason": None}], + }, + { + "id": "chatcmpl-87sb95K4EF2nuJRcTs43Tm9ntTemv", + "object": "chat.completion.chunk", + "created": 1706565311, + "model": "gpt-3.5-turbo-0613", + "system_fingerprint": None, + "choices": [{"index": 0, "delta": {"content": "100"}, "logprobs": None, "finish_reason": None}], + }, + { + "id": "chatcmpl-87sb95K4EF2nuJRcTs43Tm9ntTemv", + "object": "chat.completion.chunk", + "created": 1706565311, + "model": "gpt-3.5-turbo-0613", + "system_fingerprint": None, + "choices": [{"index": 0, "delta": {"content": " degrees"}, "logprobs": None, "finish_reason": None}], + }, + { + "id": "chatcmpl-87sb95K4EF2nuJRcTs43Tm9ntTemv", + "object": "chat.completion.chunk", + "created": 1706565311, + "model": "gpt-3.5-turbo-0613", + "system_fingerprint": None, + "choices": [{"index": 0, "delta": {"content": " Celsius"}, "logprobs": None, "finish_reason": None}], + }, + { + "id": "chatcmpl-87sb95K4EF2nuJRcTs43Tm9ntTemv", + "object": "chat.completion.chunk", + "created": 1706565311, + "model": "gpt-3.5-turbo-0613", + "system_fingerprint": None, + "choices": [{"index": 0, "delta": {"content": "."}, "logprobs": None, "finish_reason": None}], + }, + { + "id": "chatcmpl-87sb95K4EF2nuJRcTs43Tm9ntTemv", + "object": "chat.completion.chunk", + "created": 1706565311, + "model": "gpt-3.5-turbo-0613", + "system_fingerprint": None, + "choices": [{"index": 0, "delta": {}, "logprobs": None, "finish_reason": "stop"}], + }, + ], + ] +} +RESPONSES_V1 = { + "You are a scientist.": [ + { + "content-type": "application/json", + "openai-model": "gpt-3.5-turbo-0613", + "openai-organization": "new-relic-nkmd8b", + "openai-processing-ms": "6326", + "openai-version": "2020-10-01", + "x-ratelimit-limit-requests": "200", + "x-ratelimit-limit-tokens": "40000", + "x-ratelimit-limit-tokens_usage_based": "40000", + "x-ratelimit-remaining-requests": "198", + "x-ratelimit-remaining-tokens": "39880", + "x-ratelimit-remaining-tokens_usage_based": "39880", + "x-ratelimit-reset-requests": "11m32.334s", + "x-ratelimit-reset-tokens": "180ms", + "x-ratelimit-reset-tokens_usage_based": "180ms", + "x-request-id": "f8d0f53b6881c5c0a3698e55f8f410ac", + }, + 200, + { + "id": "chatcmpl-8TJ9dS50zgQM7XicE8PLnCyEihRug", + "object": "chat.completion", + "created": 1701995833, + "model": "gpt-3.5-turbo-0613", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "212 degrees Fahrenheit is equal to 100 degrees Celsius.", + }, + "finish_reason": "stop", + } + ], + "usage": {"prompt_tokens": 26, "completion_tokens": 82, "total_tokens": 108}, + "system_fingerprint": None, + }, + ], + "You are a mathematician.": [ + { + "content-type": "application/json", + "openai-model": "gpt-3.5-turbo-0613", + "openai-organization": "new-relic-nkmd8b", + "openai-processing-ms": "6326", + "openai-version": "2020-10-01", + "x-ratelimit-limit-requests": "200", + "x-ratelimit-limit-tokens": "40000", + "x-ratelimit-limit-tokens_usage_based": "40000", + "x-ratelimit-remaining-requests": "198", + "x-ratelimit-remaining-tokens": "39880", + "x-ratelimit-remaining-tokens_usage_based": "39880", + "x-ratelimit-reset-requests": "11m32.334s", + "x-ratelimit-reset-tokens": "180ms", + "x-ratelimit-reset-tokens_usage_based": "180ms", + "x-request-id": "f8d0f53b6881c5c0a3698e55f8f410cd", + }, + 200, + { + "id": "chatcmpl-87sb95K4EF2nuJRcTs43Tm9ntTeat", + "object": "chat.completion", + "created": 1701995833, + "model": "gpt-3.5-turbo-0613", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "1 plus 2 is 3.", + }, + "finish_reason": "stop", + } + ], + "usage": {"prompt_tokens": 26, "completion_tokens": 82, "total_tokens": 108}, + "system_fingerprint": None, + }, + ], + "Invalid API key.": [ + {"content-type": "application/json; charset=utf-8", "x-request-id": "a51821b9fd83d8e0e04542bedc174310"}, + 401, + { + "error": { + "message": "Incorrect API key provided: DEADBEEF. You can find your API key at https://platform.openai.com/account/api-keys.", + "type": "invalid_request_error", + "param": None, + "code": "invalid_api_key", + } + }, + ], + "Model does not exist.": [ + {"content-type": "application/json; charset=utf-8", "x-request-id": "3b0f8e510ee8a67c08a227a98eadbbe6"}, + 404, + { + "error": { + "message": "The model `does-not-exist` does not exist", + "type": "invalid_request_error", + "param": None, + "code": "model_not_found", + } + }, + ], + "No usage data": [ + { + "content-type": "application/json", + "openai-model": "gpt-3.5-turbo-0613", + "openai-organization": "new-relic-nkmd8b", + "openai-processing-ms": "6326", + "openai-version": "2020-10-01", + "x-ratelimit-limit-requests": "200", + "x-ratelimit-limit-tokens": "40000", + "x-ratelimit-limit-tokens_usage_based": "40000", + "x-ratelimit-remaining-requests": "198", + "x-ratelimit-remaining-tokens": "39880", + "x-ratelimit-remaining-tokens_usage_based": "39880", + "x-ratelimit-reset-requests": "11m32.334s", + "x-ratelimit-reset-tokens": "180ms", + "x-ratelimit-reset-tokens_usage_based": "180ms", + "x-request-id": "f8d0f53b6881c5c0a3698e55f8f410ac", + }, + 200, + { + "id": "chatcmpl-8TJ9dS50zgQM7XicE8PLnCyEihRug", + "object": "chat.completion", + "created": 1701995833, + "model": "gpt-3.5-turbo-0613", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "212 degrees Fahrenheit is equal to 100 degrees Celsius.", + }, + "finish_reason": "stop", + } + ], + "usage": None, + "system_fingerprint": None, + }, + ], + "This is an embedding test.": [ + { + "content-type": "application/json", + "openai-organization": "foobar-jtbczk", + "openai-processing-ms": "21", + "openai-version": "2020-10-01", + "x-ratelimit-limit-requests": "200", + "x-ratelimit-limit-tokens": "150000", + "x-ratelimit-remaining-requests": "197", + "x-ratelimit-remaining-tokens": "149993", + "x-ratelimit-reset-requests": "19m5.228s", + "x-ratelimit-reset-tokens": "2ms", + "x-request-id": "fef7adee5adcfb03c083961bdce4f6a4", + }, + 200, + { + "object": "list", + "data": [ + { + "object": "embedding", + "index": 0, + "embedding": "SLewvFF6iztXKj07UOCQO41IorspWOk79KHuu12FrbwjqLe8FCTnvBKqj7sz6bM8qqUEvFSfITpPrJu7uOSbPM8agzyYYqM7YJl/PBF2mryNN967uRiRO9lGcbszcuq7RZIavAnnNLwWA5s8mnb1vG+UGTyqpYS846PGO2M1X7wIxAO8HfgFvc8s8LuQXPQ5qgsKPOinEL15ndY8/MrOu1LRMTxCbQS7PEYJOyMx7rwDJj+79dVjO5P4UzmoPZq8jUgivL36UjzA/Lc8Jt6Ru4bKAL1jRiM70i5VO4neUjwneAy7mlNEPBVpoDuayo28TO2KvAmBrzzwvyy8B3/KO0ZgCry3sKa6QTmPO0a1Szz46Iw87AAcPF0O5DyJVZw8Ac+Yu1y3Pbqzesw8DUDAuq8hQbyALLy7TngmPL6lETxXxLc6TzXSvKJrYLy309c8OHa0OU3NZ7vru2K8mIXUPCxrErxLU5C5s/EVPI+wjLp7BcE74TvcO+2aFrx4A9w80j+Zu/aAojwmzU08k/hTvBpL4rvHFFQ76YftutrxL7wyxgK9BsIevLkYkTq4B028OZnlPPkcgjxhzfS79oCiuB34BbwITTq97nrzOugwRzwGS1U7CqTgvFxROLx4aWG7E/DxPA3J9jwd+AU8dVWPvGlc2jzwWae57nrzu569E72GU7e8Vn9+vFLA7TtVbZE8eOCqPG+3Sjxr5/W8s+DRPE+sm7wFKKQ8A8A5vUSBVryeIxk8hsqAPAeQjryeIxm8gU/tuxVpoDxVXM250GDlOlEDwjs0t6O8Tt6rOVrGHLvmyFy6dhI7PLPxlbv3YP88B/YTPEZgCrxqKsq8Xh+ou96wQLp5rpo8LSg+vL63/rsFjqk8E/DxPEi3MDzTcw66PjcqPNgSfLwqnaK85QuxPI7iHL2+pRE8Z+ICOxzEELvph+07jHqyu2ltnrwNQMC82BL8vAOdiDwSqo88CLM/PCKFBrzmP6a85Nc7PBaM0bvh1VY7NB2pvMkF9Tx3New87mgGPAoKZjo+nS+/Rk/GucqwMz3fwYS8yrCzPMo56jyDHV08XLe9vB4+aLwXwMY8dVUPvCFATbx2eMC8V7NzvEnrpTsIxIO7yVmNu2lc2ryGQnM8A6/1PH/VFbySO6g80i5VPOY/prv6cyi7W5QMPJVP+jsyLIi84H6wPKM50DrZNIS8UEaWPPrIaTzvrmg8rcoaPRuQm7ysH9y8OxIUO7ss4zq3Od08paG6vAPAuTjYAI88/qmCuuROhbzBMK08R4M7u67+j7uClKa6/KedOsqNArzysM08QJ8UvMD8t7v5P7M799fIvAWx2jxiEi48ja6nPL0LFzxFkpq7LAWNPA1AQLyWlLO6qrfxvOGypTxJUau8aJ8uPceLnTtS0TG9omtgPO7xPDvzbfm7FfJWu2CqwzwAASk96FN4PLPgUbwRdhq8Vn9+PLk7wjs8NUW84yx9vHJCZjzysM079hodO/NbDL2BxrY6CE26OzpEpDv7DaM8y0quO41IIr1+Kte8QdMJvKlxDzy9+lI8hfyQPA3J9jzWmKS7z6O5u4a5vLtXKj088XzYO1fEtzwY4/e7Js1NugbCnjymxOu7906SvPSPAb1ieDO8dnjAu/EW0zp/b5C8mGIjvWTPWTwIxIM8YgFqPKvrZrwKpOA7/jK5O2vViDyfaXs8DR2Pu0AFGrvTc446IIOhvDreHrxRnTw8ROdbu55Gyrsht5Y8tVmAvHK5rzzZvTo8bx1QPMglmLvigBU8oIuDvAFYz7pblIw8OZnlOsTvPbxhzfS8BxnFOpkwE72E60w7cNp7utp6ZrtvHdC4uwmyO5dRX7sAm6M7kqEtvElRK7yWg++7JHanvM6ACDvrZqG8Xh+oupQsyTwkZWO8VzuBu5xVKbzEZoc7wB9pvA796zyZlpi8YbsHvQs+W7u9cZy8gKMFOxYDGzyu7Uu71KeDPJxVqbxwyI68VpDCu9VT67xKqFG7KWmtuvNteTocs0w7aJ8uPMUSbzz6cyg8MiwIPEtlfTo+wOA75tkgu7VZgDw8WPa8mGIjPKq38bsr0Zc7Ot4evNNiyju9C5c7YCENPP6pAj3uV8I7X3bOusfxIjvpZLy655bMvL9ivbxO3iu8NKbfPNe7VTz9ZMk88RZTu5QsybxeQtk7qpTAOzGSjTxSwO27mGIjPO7OC7x7FoW8wJayvI2uJzttxqk84H4wOUtlfbxblAw8uTtCPIO3Vzxkz9k8ENwfvfQYuLvHFNQ8LvatPF65ojzPLHA8+RyCvK3Kmjx27wk8Dcn2PARatDv3tBc8hkLzPEOz5jyQSoe8gU/tPMRmhzzp2wU90shPPBv2oLsNQMA8jTdevIftMTt/Xsw7MMQdPICjBT012tS7SLewvJBtuDuevZM8LyojPa6HxjtOAd07v9mGusZXqDoPqKo8qdeUvETnW7y5occ5pOSOvPPkwjsDN4O8Mk85vKnXlDtp06O7kZDpO6GuNDtRFAY9lAkYPGHNdDx2Afc7RRtROy5/5LyUoxI9mu0+u/dOEryrYrC867vivJp29TtVbZG8SVGrO0im7LnhsqU80frfPL/IwryBT+07/+/kPLZ8sTwoNbg7ZkiIOxadlbxlnUm68RbTuxkX7Tu/cwG7aqGTPO8CAbzTYsq6AIpfvA50tbzllOc7s3rMO0SBVjzXzJm8eZ3Wu4vgtzwPDrA8W6b5uwJpEzwLtaQ81pgkPJuqarxmro288369u48WkjwREBU9JP/dPJ69kzvw4t27h3bouxhrBbwrNx29F9EKPFmSJ7v8px08Tt6rvEJthLxon648UYz4u61TUTz4lPQ7ERAVuhwqFrzfSjs8RRtRO6lxD7zHelm87lfCu10O5LrXMh886YftvL9iPTxCf/E6MZKNOmAhDb2diZ47eRSgPBfRCrznlsw5MiwIvHW7FD3tI807uG3SPE7eqzx1VY864TtcO3zTMDw7EhS8c+0kPLr47TvUDQm8domEvEi3MLruaAa7tUi8u4FgsTwbkBu6pQfAvEJthLwDnQg8S1OQO55GSrxZLCK8nkZKvFXTFr01dM+8W6Z5vO+u6Luh0eW8rofGvFsdw7x7KHK8sN5svCFAzbo/0SS8f9UVu7Qli7wr0Re95E4FvSg1ODok/907AAGpPHQhGrwtS++71pgkvCtazjsSzcC7exYFPLVZgLzZmom7W6Z5PHr0fLtn9O86oUivukvcRrzjPcE8a8REPAei+zoBNZ685aUrPNBg5bqeIxk8FJuwPPdOkrtUOZy8GRftO4KD4rz/72Q7ERCVu8WJODy5O8I5L7NZuxJECjxFkpq8Uq4AOy2fh7wY9Du8GRdtu48o/7mHdug803MOvCUQIrw2hZM8v+tzvE54pruyI6a6exYFvDXrGDwNQEA8zyxwO7c53TwUJGe8Wk9Tu6ouu7yqCwo8vi7IvNe71TxB04m8domEvKTkDrzsidK8+nOovLfT1zr11eM7SVErO3EOcbzqMqw74Tvcut4WRrz5pbi8oznQvMi/Er0aS+I87lfCvK+qdztd6zI83eJQPFy3vbyACQu9/8wzO/k/s7weG7e8906SPA3J9jw8NUU8TUQxPfEWU7wjH4E8J3gMPC72LTp6SJU8exaFOXBiibyf4MS6EXYaO3DIjjy61by7ACRaO5NvnTvMGB48Dw6wPFEUBr30j4E7niMZvIZC87s7EpS8OZnlPJZxgrxug9U7/DDUvNrxL7yV14e3E2c7PBdaQTwT8HE8oIuDPGIB6rvMB9o6cR+1OwbCHrylfgm8z6M5vIiqXbxFG1G8a9WIPItp7rpGT8Y838GEvAoK5jyAG3g7xRJvPPxBGLzJWQ28XYWtO85vRLp0IZq8cR81vc7mDb28PSe89LKyuig1uDyxEuK8GlwmPIbKgLwHGcW7/qkCvC8ZXzzSyE89F8BGOxPw8Tx+Ktc8BkvVurXiNryRkOk8jyj/OcKH0zp69Pw8apDPPFuUjLwPDrC8xuBeuD43KrxuYKQ7qXGPvF0OZDx1VQ88VVzNvD9rn7ushWE7EZlLvSL9+DrHi528dzXsu3k30bzeFka7hrm8vD3gAz1/Xsy80D20PNPZE7sorAG86WS8u2Y3xDtvHVC7PKwOO5DkAT3KOeo8c+0kvI+fyLuY61k8SKbsO4TrzLrrZqE87O9XvMkF9Tynb6q847SKvBjjdzyhSK88zTtPPNNzjjsvGV87UQPCvMD8t7stn4e7GRftPBQkZ7x4eiW7sqzcu3ufO7yAG3g8OHa0u0T4n7wcxJC7r6r3vAbCnrth3rg7BxnFumqQzzyXyCi8V8Q3vEPEqjyIu6E8Ac+YvGR6GLulkHY8um83PMqNgrv5pTi8N7kIPOhTeLy6TIY8B5COvDLGArvEzAy9IbcWvIUfQjxQ4BC7B/aTvCfwfrz15ie8ucR4PD1pursLtSS8AgMOOzIsiLv0srI7Q01hPCvRF7vySsg6O5tKunh6JTvCZCI7xuDevLc53btvLhQ8/pi+PJU9Dbugi4O8Qn/xvLpMhrth3ji8n/GIPKouu7tBS3y853MbPGAQyTt27wk7iokRO8d62bzZRnG7sN5svAG+1Lqvqve8JGXjur0Ll7tCf/E75/xRPIWFx7wgDNi8ucT4OZNvHb2nktu8qrfxuyR2J7zWh2A6juKcPDhlcLx/1RU9IAxYPGJ4szylB8C8qfrFO276HjuWcQK9QdOJvCUQIjzjo8a8SeslvBrCKztCf/E66MrBOx1eCz2Xt+Q66YdtvKg9mrrLSq47fFznO1uUjDsoNTg8QyqwuzH4Ejz/Zi67A8A5uKg9GrtFkhq862ahOzSmXzkMDEs8q+vmvNVkLzwc1n28mu0+vCbekTyCg+K7ekgVvO8CAT2yRtc8apBPu1b2R7zUp4M8VW2RvPc9zrx69Hw753ObvCcSB71sG+u8OwHQuv67b7zLSi65HrWxO0ZPRrxmwPq7t7CmPGxvAzygnfC8oIsDvKY7tbwZF+07p2+qvOnbhbv0oW47/2auuThlcDwIxIM8n/EIO6ijH7vHetk7uRiRPGUDT7pgh5I85shcPpGQabykShS7FWmgPPjojDvJ8wc8mlPEOY2uJzt7FoW7HNb9O7rVvDzKjQI80NcuuqvINbvNTBO8TgFdvEJ/cbzEZoe8SVGrvMvkqLyHdui7P2ufvBSbMDw0t6O82GaUPOLmGrxSNze8KVjpuwizPzwqjN48Xh8ovE4B3TtiAeo8azsOO8eLnbyO4py7x/GiPIvgNzzvi7c8BFq0O/dOEj1fU5282ZoJPCL9+LqyIyY8IoUGPNI/mbwKpGC7EkQKuzrN2jwVzyU7QpA1vLIjpjwi64s8HYE8u6eSW7yryLU8yK5OOzysjjwi6wu8GsIrOu7xPDwCaRO8dzVsPP/vZLwT3oQ8cQ7xvOJv0TtWBww8hlM3PBPeBDxT9OK71pgkPPSysrugiwO90GDlvHOHHz3xfNg8904SPVpglzzmP6a7Cgrmu9/BBLyH7bG85QsxvVSfIb2Xt2Q8paG6vOqYsTos9Mi8nqxPu8wHWjuYhdS7GAWAvCIOvTp/bxA8j7CMPG1P4Dxd67I7xxRUvOM9wbxMhwU9Kp0iPfF82LvQYOU6XkJZPBxNx7y0nX28B5COO8FT3rp4eiW8R/oEvSfw/jtC9rq8n/GIux3nQTw8WPY8LBf6uzSmXzzSPxm88rDNvDysDjwyPnW7tdFyPBLNwDo8WHa8bPi5vOO0CrylGAQ8YgFqvEFLfDy7LOO7TIeFPAHPmDv3YP+6/+9kPBKqjzt5rpo8VJ+hvE7eKzyc3t88P2sfvLQUR7wJ1vC6exaFvD6dr7zNO888i+A3ulwuhzuF/JC8gKMFveoyLLxqBxk7YgFquws+2zwOUYS8agcZvGJ4M71AjtC747QKvAizP73UH3a7LvatPJBtuLzEzIy8bG8DvJEHM75E59s7zbIYPObZIL2uZJW7WRveugblTzy6TIa802JKvD9rH7xlA088QAWavIFP7bwL2FW8vqWRu0ZgijyRkGm7ZGnUvIeHLD1c2m48THbBPPkcAr1NzWc8+JT0uulkvLvXMp+7lU96u7kYET1xhTo8e3wKvItGPTxb+hG87mgGPWqhk7uhrrQ73rBAPCbNTT13rDW8K8DTus8s8DsNt4k8gpQmPLES4ryyvSA8lcbDO60woDyLVwE9BFq0u+cNFj3C7Vi8UXoLPDYOyryQ0z083+S1Ox34hTzEzIw7pX4Ju6ouuzxIpmw8w5iXuylYaTy5sgu9Js3NOo+fyLyjFp+8MMSdvOROBb2n+OA7b7fKOeIJzDoNpkW8WsYct7SdfTxXxLc7TO2KO3YB9zynktu7OkSkPKnXFLvtRv47AJujuzGSDT0twjg8AgOOO4d26DvpZDy8lAkYPI5r0zcGS9W8OGXwu9xIVjyH7TG9IUDNuiqMXrwb9qA79I+BPL1xHLuVPY07MOfOO0ztCruvMoW8BuXPu4AbeLyIRNg8uG3SPO5XQjuFH0K8zm9EPEAoSz0tKL652ZqJOgABqbwsjsM8mlPEPLewpjsVWNw8OGXwOlYHjLzfwQQ81iFbOyJ0Qj3d85S7cQ7xvIqswjxKhSC7906SvAFYz72xiau8LAWNPB1eCz09jGu72ZoJPfDiXTwPDrA8CYGvvNH6XzxTa6y8+RwCvY8of7xxDnG8Ef/QvJ9p+zqh0eU8a16/OzBN1LyDLiE9PFh2u+0jTbxLUxA9ZZ3JvItXgbqL4Dc8BuXPvKnXFDzmPyY8k/hTOlum+bqAksG8OZnluPmluLxRnTy6/KcdvKAUOrzRcSm8fqEgPcTeebzeOXc8KCR0OnN2W7xRA0K8Wsacu+M9wToyLIi8mTATu21P4LuadvW8Dtq6vPmlODsjqLe88ieXPJEHszySoa08U/RiPNQNCbwb9qC8bG+DOXW7FL0OdLW7Tc3nvG8dULsAJNo7fNMwO7sJMr2O4hy85ZTnuwAkWjw+Nyq8rcoaO+8lsrvx86E8U/TivGUUkzp6SJW8lT0NvWz4uTzeFka6qguKvIKD4rt/1ZU8LBf6vD6dr7es/Ko7qWBLvIlVHDxwUUU6Jt4RvRJEijnRcSk88235PGvVCL3zbfm8DaZFO+7xvLs3qES8oznQO9XKNDxZLKK8IIMhvComWb0CAw48fDk2O+nbBb29C5e8ogVbu1EUBryYhdS7OTPgOul1AD25sgs7i1cBPBYmzLtSroA8hfyQvP3bErz9h/o82ZoJO7/ZhjxtT+A8UZ28uzaFk7wJ1nA6dd7FPGg5Kbwb9iC8psRrvBXyVjzGRuS8uAfNu0+smzvFAAK96FN4vC2fhzy65oC7tgXou/9mLjxMELw8GSgxPRBlVjxDxCq80j8ZveinkDxHgzu70j8ZvPGNnDyPn0i8Vn9+urXR8ju10fI7sRJiPDBemLt8OTa8tJ39O4ne0rsaXKa7t0ohPHQhGrdYXjI824sqvDw1RT2/2YY8E/BxPIUOfjv9dQ08PM8/PMwYHrwwXpi7nqxPPM8aA7w+wOC7ROdbO79iPTxVbRE8U45dPOOjRjxwYok8ME1Uu1SfIbyifKQ8UXqLPI85wzsITTq8R+lAPMRVQzzcv58892B/Oqg9mjw3MXu7P9EkvM6AiLyx7zA8eHolPLYWLLugFLq8AJsjvEOzZjk6RKQ8uRgRPXVVjzw0HSk9PWk6PLss47spzzK93rBAvJpTxDun+OC7OTPgvEa1yzvAH+k5fZDcOid4jLuN0di8N7kIPPe0F7wVaSC8zxoDvJVgvrvUpwO9dd7FPKUHQLxn4oI7Ng7KPIydYzzZRvE8LTkCu3bvCTy10fK7QAWaPGHeOLu6+O27omvgO8Rmh7xrXj87AzeDvORg8jnGRuS8UEYWPLPg0TvYZpQ9FJuwPLC7O7xug1U8bvoevAnW8DvxFtM8kEoHPDxYdrzcWZq8n3q/O94nCjvZI0C82yUlvayWpbyHh6y7ME1UO9b+KTzbFGG89oCiPFpgFzzhTKA84gnMPKgsVjyia+C7XNpuPHxc5zyDLqG8ukyGvKqUQLwG5U88wB/pO+B+ML2O4py8MOdOPHt8irsDnYg6rv6PumJ4szzuV0I80qWePKTkDj14A9y8fqEgu9DXLjykbUU7yEhJvLYFaLyfVw68", + } + ], + "model": "text-embedding-ada-002-v2", + "usage": {"prompt_tokens": 6, "total_tokens": 6}, + }, + ], +} +STREAMED_RESPONSES_V1 = { + "Invalid API key.": [ + {"content-type": "application/json; charset=utf-8", "x-request-id": "req_a78a2cb09e3c7f224e78bfbf0841e38a"}, + 401, + { + "error": { + "message": "Incorrect API key provided: DEADBEEF. You can find your API key at https://platform.openai.com/account/api-keys.", + "type": "invalid_request_error", + "param": None, + "code": "invalid_api_key", + } + }, + ], + "Model does not exist.": [ + {"content-type": "application/json; charset=utf-8", "x-request-id": "req_a03714410fba92532c7de2532d8cf46c"}, + 404, + { + "error": { + "message": "The model `does-not-exist` does not exist", + "type": "invalid_request_error", + "param": None, + "code": "model_not_found", + } + }, + ], + "You are a scientist.": [ + { + "content-type": "text/event-stream", + "openai-model": "gpt-3.5-turbo-0613", + "openai-organization": "new-relic-nkmd8b", + "openai-processing-ms": "6326", + "openai-version": "2020-10-01", + "x-ratelimit-limit-requests": "200", + "x-ratelimit-limit-tokens": "40000", + "x-ratelimit-remaining-requests": "198", + "x-ratelimit-remaining-tokens": "39880", + "x-ratelimit-reset-requests": "11m32.334s", + "x-ratelimit-reset-tokens": "180ms", + "x-request-id": "f8d0f53b6881c5c0a3698e55f8f410ac", + }, + 200, + [ + { + "id": "chatcmpl-8TJ9dS50zgQM7XicE8PLnCyEihRug", + "object": "chat.completion.chunk", + "created": 1707867026, + "model": "gpt-3.5-turbo-0613", + "system_fingerprint": None, + "choices": [ + {"index": 0, "delta": {"role": "assistant", "content": ""}, "logprobs": None, "finish_reason": None} + ], + }, + { + "id": "chatcmpl-8TJ9dS50zgQM7XicE8PLnCyEihRug", + "object": "chat.completion.chunk", + "created": 1707867026, + "model": "gpt-3.5-turbo-0613", + "system_fingerprint": None, + "choices": [{"index": 0, "delta": {"content": "212"}, "logprobs": None, "finish_reason": None}], + }, + { + "id": "chatcmpl-8TJ9dS50zgQM7XicE8PLnCyEihRug", + "object": "chat.completion.chunk", + "created": 1707867026, + "model": "gpt-3.5-turbo-0613", + "system_fingerprint": None, + "choices": [{"index": 0, "delta": {"content": " degrees"}, "logprobs": None, "finish_reason": None}], + }, + { + "id": "chatcmpl-8TJ9dS50zgQM7XicE8PLnCyEihRug", + "object": "chat.completion.chunk", + "created": 1707867026, + "model": "gpt-3.5-turbo-0613", + "system_fingerprint": None, + "choices": [{"index": 0, "delta": {"content": " Fahrenheit"}, "logprobs": None, "finish_reason": None}], + }, + { + "id": "chatcmpl-8TJ9dS50zgQM7XicE8PLnCyEihRug", + "object": "chat.completion.chunk", + "created": 1707867026, + "model": "gpt-3.5-turbo-0613", + "system_fingerprint": None, + "choices": [{"index": 0, "delta": {"content": " is"}, "logprobs": None, "finish_reason": None}], + }, + { + "id": "chatcmpl-8TJ9dS50zgQM7XicE8PLnCyEihRug", + "object": "chat.completion.chunk", + "created": 1707867026, + "model": "gpt-3.5-turbo-0613", + "system_fingerprint": None, + "choices": [{"index": 0, "delta": {"content": " equal"}, "logprobs": None, "finish_reason": None}], + }, + { + "id": "chatcmpl-8TJ9dS50zgQM7XicE8PLnCyEihRug", + "object": "chat.completion.chunk", + "created": 1707867026, + "model": "gpt-3.5-turbo-0613", + "system_fingerprint": None, + "choices": [{"index": 0, "delta": {"content": " to"}, "logprobs": None, "finish_reason": None}], + }, + { + "id": "chatcmpl-8TJ9dS50zgQM7XicE8PLnCyEihRug", + "object": "chat.completion.chunk", + "created": 1707867026, + "model": "gpt-3.5-turbo-0613", + "system_fingerprint": None, + "choices": [{"index": 0, "delta": {"content": " "}, "logprobs": None, "finish_reason": None}], + }, + { + "id": "chatcmpl-8TJ9dS50zgQM7XicE8PLnCyEihRug", + "object": "chat.completion.chunk", + "created": 1707867026, + "model": "gpt-3.5-turbo-0613", + "system_fingerprint": None, + "choices": [{"index": 0, "delta": {"content": "100"}, "logprobs": None, "finish_reason": None}], + }, + { + "id": "chatcmpl-8TJ9dS50zgQM7XicE8PLnCyEihRug", + "object": "chat.completion.chunk", + "created": 1707867026, + "model": "gpt-3.5-turbo-0613", + "system_fingerprint": None, + "choices": [{"index": 0, "delta": {"content": " degrees"}, "logprobs": None, "finish_reason": None}], + }, + { + "id": "chatcmpl-8TJ9dS50zgQM7XicE8PLnCyEihRug", + "object": "chat.completion.chunk", + "created": 1707867026, + "model": "gpt-3.5-turbo-0613", + "system_fingerprint": None, + "choices": [{"index": 0, "delta": {"content": " Celsius"}, "logprobs": None, "finish_reason": None}], + }, + { + "id": "chatcmpl-8TJ9dS50zgQM7XicE8PLnCyEihRug", + "object": "chat.completion.chunk", + "created": 1707867026, + "model": "gpt-3.5-turbo-0613", + "system_fingerprint": None, + "choices": [{"index": 0, "delta": {"content": "."}, "logprobs": None, "finish_reason": None}], + }, + { + "id": "chatcmpl-8TJ9dS50zgQM7XicE8PLnCyEihRug", + "object": "chat.completion.chunk", + "created": 1707867026, + "model": "gpt-3.5-turbo-0613", + "system_fingerprint": None, + "choices": [{"index": 0, "delta": {}, "logprobs": None, "finish_reason": "stop"}], + }, + ], + ], +} +RESPONSES = { + "Invalid API key.": ( + {"Content-Type": "application/json; charset=utf-8", "x-request-id": "4f8f61a7d0401e42a6760ea2ca2049f6"}, + 401, + { + "error": { + "message": "Incorrect API key provided: invalid. You can find your API key at https://platform.openai.com/account/api-keys.", + "type": "invalid_request_error", + "param": "null", + "code": "invalid_api_key", + } + }, + ), + "Embedded: Invalid API key.": ( + {"Content-Type": "application/json; charset=utf-8", "x-request-id": "4f8f61a7d0401e42a6760ea2ca2049f6"}, + 401, + { + "error": { + "message": "Incorrect API key provided: DEADBEEF. You can find your API key at https://platform.openai.com/account/api-keys.", + "type": "invalid_request_error", + "param": "null", + "code": "invalid_api_key", + } + }, + ), + "Model does not exist.": ( + { + "Content-Type": "application/json", + "x-request-id": "cfdf51fb795362ae578c12a21796262c", + }, + 404, + { + "error": { + "message": "The model `does-not-exist` does not exist", + "type": "invalid_request_error", + "param": "null", + "code": "model_not_found", + } + }, + ), + "No usage data": [ + { + "content-type": "application/json", + "openai-model": "gpt-3.5-turbo-0613", + "openai-organization": "new-relic-nkmd8b", + "openai-processing-ms": "6326", + "openai-version": "2020-10-01", + "x-ratelimit-limit-requests": "200", + "x-ratelimit-limit-tokens": "40000", + "x-ratelimit-limit-tokens_usage_based": "40000", + "x-ratelimit-remaining-requests": "198", + "x-ratelimit-remaining-tokens": "39880", + "x-ratelimit-remaining-tokens_usage_based": "39880", + "x-ratelimit-reset-requests": "11m32.334s", + "x-ratelimit-reset-tokens": "180ms", + "x-ratelimit-reset-tokens_usage_based": "180ms", + "x-request-id": "f8d0f53b6881c5c0a3698e55f8f410ac", + }, + 200, + { + "id": "chatcmpl-8TJ9dS50zgQM7XicE8PLnCyEihRug", + "object": "chat.completion", + "created": 1701995833, + "model": "gpt-3.5-turbo-0613", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "212 degrees Fahrenheit is equal to 100 degrees Celsius.", + }, + "finish_reason": "stop", + } + ], + "usage": None, + "system_fingerprint": None, + }, + ], + "This is an embedding test.": ( + { + "Content-Type": "application/json", + "openai-organization": "new-relic-nkmd8b", + "openai-processing-ms": "54", + "openai-version": "2020-10-01", + "x-ratelimit-limit-requests": "200", + "x-ratelimit-limit-tokens": "150000", + "x-ratelimit-remaining-requests": "197", + "x-ratelimit-remaining-tokens": "149994", + "x-ratelimit-reset-requests": "19m45.394s", + "x-ratelimit-reset-tokens": "2ms", + "x-request-id": "c70828b2293314366a76a2b1dcb20688", + }, + 200, + { + "data": [ + { + "embedding": "SLewvFF6iztXKj07UOCQO41IorspWOk79KHuu12FrbwjqLe8FCTnvBKqj7sz6bM8qqUEvFSfITpPrJu7uOSbPM8agzyYYqM7YJl/PBF2mryNN967uRiRO9lGcbszcuq7RZIavAnnNLwWA5s8mnb1vG+UGTyqpYS846PGO2M1X7wIxAO8HfgFvc8s8LuQXPQ5qgsKPOinEL15ndY8/MrOu1LRMTxCbQS7PEYJOyMx7rwDJj+79dVjO5P4UzmoPZq8jUgivL36UjzA/Lc8Jt6Ru4bKAL1jRiM70i5VO4neUjwneAy7mlNEPBVpoDuayo28TO2KvAmBrzzwvyy8B3/KO0ZgCry3sKa6QTmPO0a1Szz46Iw87AAcPF0O5DyJVZw8Ac+Yu1y3Pbqzesw8DUDAuq8hQbyALLy7TngmPL6lETxXxLc6TzXSvKJrYLy309c8OHa0OU3NZ7vru2K8mIXUPCxrErxLU5C5s/EVPI+wjLp7BcE74TvcO+2aFrx4A9w80j+Zu/aAojwmzU08k/hTvBpL4rvHFFQ76YftutrxL7wyxgK9BsIevLkYkTq4B028OZnlPPkcgjxhzfS79oCiuB34BbwITTq97nrzOugwRzwGS1U7CqTgvFxROLx4aWG7E/DxPA3J9jwd+AU8dVWPvGlc2jzwWae57nrzu569E72GU7e8Vn9+vFLA7TtVbZE8eOCqPG+3Sjxr5/W8s+DRPE+sm7wFKKQ8A8A5vUSBVryeIxk8hsqAPAeQjryeIxm8gU/tuxVpoDxVXM250GDlOlEDwjs0t6O8Tt6rOVrGHLvmyFy6dhI7PLPxlbv3YP88B/YTPEZgCrxqKsq8Xh+ou96wQLp5rpo8LSg+vL63/rsFjqk8E/DxPEi3MDzTcw66PjcqPNgSfLwqnaK85QuxPI7iHL2+pRE8Z+ICOxzEELvph+07jHqyu2ltnrwNQMC82BL8vAOdiDwSqo88CLM/PCKFBrzmP6a85Nc7PBaM0bvh1VY7NB2pvMkF9Tx3New87mgGPAoKZjo+nS+/Rk/GucqwMz3fwYS8yrCzPMo56jyDHV08XLe9vB4+aLwXwMY8dVUPvCFATbx2eMC8V7NzvEnrpTsIxIO7yVmNu2lc2ryGQnM8A6/1PH/VFbySO6g80i5VPOY/prv6cyi7W5QMPJVP+jsyLIi84H6wPKM50DrZNIS8UEaWPPrIaTzvrmg8rcoaPRuQm7ysH9y8OxIUO7ss4zq3Od08paG6vAPAuTjYAI88/qmCuuROhbzBMK08R4M7u67+j7uClKa6/KedOsqNArzysM08QJ8UvMD8t7v5P7M799fIvAWx2jxiEi48ja6nPL0LFzxFkpq7LAWNPA1AQLyWlLO6qrfxvOGypTxJUau8aJ8uPceLnTtS0TG9omtgPO7xPDvzbfm7FfJWu2CqwzwAASk96FN4PLPgUbwRdhq8Vn9+PLk7wjs8NUW84yx9vHJCZjzysM079hodO/NbDL2BxrY6CE26OzpEpDv7DaM8y0quO41IIr1+Kte8QdMJvKlxDzy9+lI8hfyQPA3J9jzWmKS7z6O5u4a5vLtXKj088XzYO1fEtzwY4/e7Js1NugbCnjymxOu7906SvPSPAb1ieDO8dnjAu/EW0zp/b5C8mGIjvWTPWTwIxIM8YgFqPKvrZrwKpOA7/jK5O2vViDyfaXs8DR2Pu0AFGrvTc446IIOhvDreHrxRnTw8ROdbu55Gyrsht5Y8tVmAvHK5rzzZvTo8bx1QPMglmLvigBU8oIuDvAFYz7pblIw8OZnlOsTvPbxhzfS8BxnFOpkwE72E60w7cNp7utp6ZrtvHdC4uwmyO5dRX7sAm6M7kqEtvElRK7yWg++7JHanvM6ACDvrZqG8Xh+oupQsyTwkZWO8VzuBu5xVKbzEZoc7wB9pvA796zyZlpi8YbsHvQs+W7u9cZy8gKMFOxYDGzyu7Uu71KeDPJxVqbxwyI68VpDCu9VT67xKqFG7KWmtuvNteTocs0w7aJ8uPMUSbzz6cyg8MiwIPEtlfTo+wOA75tkgu7VZgDw8WPa8mGIjPKq38bsr0Zc7Ot4evNNiyju9C5c7YCENPP6pAj3uV8I7X3bOusfxIjvpZLy655bMvL9ivbxO3iu8NKbfPNe7VTz9ZMk88RZTu5QsybxeQtk7qpTAOzGSjTxSwO27mGIjPO7OC7x7FoW8wJayvI2uJzttxqk84H4wOUtlfbxblAw8uTtCPIO3Vzxkz9k8ENwfvfQYuLvHFNQ8LvatPF65ojzPLHA8+RyCvK3Kmjx27wk8Dcn2PARatDv3tBc8hkLzPEOz5jyQSoe8gU/tPMRmhzzp2wU90shPPBv2oLsNQMA8jTdevIftMTt/Xsw7MMQdPICjBT012tS7SLewvJBtuDuevZM8LyojPa6HxjtOAd07v9mGusZXqDoPqKo8qdeUvETnW7y5occ5pOSOvPPkwjsDN4O8Mk85vKnXlDtp06O7kZDpO6GuNDtRFAY9lAkYPGHNdDx2Afc7RRtROy5/5LyUoxI9mu0+u/dOEryrYrC867vivJp29TtVbZG8SVGrO0im7LnhsqU80frfPL/IwryBT+07/+/kPLZ8sTwoNbg7ZkiIOxadlbxlnUm68RbTuxkX7Tu/cwG7aqGTPO8CAbzTYsq6AIpfvA50tbzllOc7s3rMO0SBVjzXzJm8eZ3Wu4vgtzwPDrA8W6b5uwJpEzwLtaQ81pgkPJuqarxmro288369u48WkjwREBU9JP/dPJ69kzvw4t27h3bouxhrBbwrNx29F9EKPFmSJ7v8px08Tt6rvEJthLxon648UYz4u61TUTz4lPQ7ERAVuhwqFrzfSjs8RRtRO6lxD7zHelm87lfCu10O5LrXMh886YftvL9iPTxCf/E6MZKNOmAhDb2diZ47eRSgPBfRCrznlsw5MiwIvHW7FD3tI807uG3SPE7eqzx1VY864TtcO3zTMDw7EhS8c+0kPLr47TvUDQm8domEvEi3MLruaAa7tUi8u4FgsTwbkBu6pQfAvEJthLwDnQg8S1OQO55GSrxZLCK8nkZKvFXTFr01dM+8W6Z5vO+u6Luh0eW8rofGvFsdw7x7KHK8sN5svCFAzbo/0SS8f9UVu7Qli7wr0Re95E4FvSg1ODok/907AAGpPHQhGrwtS++71pgkvCtazjsSzcC7exYFPLVZgLzZmom7W6Z5PHr0fLtn9O86oUivukvcRrzjPcE8a8REPAei+zoBNZ685aUrPNBg5bqeIxk8FJuwPPdOkrtUOZy8GRftO4KD4rz/72Q7ERCVu8WJODy5O8I5L7NZuxJECjxFkpq8Uq4AOy2fh7wY9Du8GRdtu48o/7mHdug803MOvCUQIrw2hZM8v+tzvE54pruyI6a6exYFvDXrGDwNQEA8zyxwO7c53TwUJGe8Wk9Tu6ouu7yqCwo8vi7IvNe71TxB04m8domEvKTkDrzsidK8+nOovLfT1zr11eM7SVErO3EOcbzqMqw74Tvcut4WRrz5pbi8oznQvMi/Er0aS+I87lfCvK+qdztd6zI83eJQPFy3vbyACQu9/8wzO/k/s7weG7e8906SPA3J9jw8NUU8TUQxPfEWU7wjH4E8J3gMPC72LTp6SJU8exaFOXBiibyf4MS6EXYaO3DIjjy61by7ACRaO5NvnTvMGB48Dw6wPFEUBr30j4E7niMZvIZC87s7EpS8OZnlPJZxgrxug9U7/DDUvNrxL7yV14e3E2c7PBdaQTwT8HE8oIuDPGIB6rvMB9o6cR+1OwbCHrylfgm8z6M5vIiqXbxFG1G8a9WIPItp7rpGT8Y838GEvAoK5jyAG3g7xRJvPPxBGLzJWQ28XYWtO85vRLp0IZq8cR81vc7mDb28PSe89LKyuig1uDyxEuK8GlwmPIbKgLwHGcW7/qkCvC8ZXzzSyE89F8BGOxPw8Tx+Ktc8BkvVurXiNryRkOk8jyj/OcKH0zp69Pw8apDPPFuUjLwPDrC8xuBeuD43KrxuYKQ7qXGPvF0OZDx1VQ88VVzNvD9rn7ushWE7EZlLvSL9+DrHi528dzXsu3k30bzeFka7hrm8vD3gAz1/Xsy80D20PNPZE7sorAG86WS8u2Y3xDtvHVC7PKwOO5DkAT3KOeo8c+0kvI+fyLuY61k8SKbsO4TrzLrrZqE87O9XvMkF9Tynb6q847SKvBjjdzyhSK88zTtPPNNzjjsvGV87UQPCvMD8t7stn4e7GRftPBQkZ7x4eiW7sqzcu3ufO7yAG3g8OHa0u0T4n7wcxJC7r6r3vAbCnrth3rg7BxnFumqQzzyXyCi8V8Q3vEPEqjyIu6E8Ac+YvGR6GLulkHY8um83PMqNgrv5pTi8N7kIPOhTeLy6TIY8B5COvDLGArvEzAy9IbcWvIUfQjxQ4BC7B/aTvCfwfrz15ie8ucR4PD1pursLtSS8AgMOOzIsiLv0srI7Q01hPCvRF7vySsg6O5tKunh6JTvCZCI7xuDevLc53btvLhQ8/pi+PJU9Dbugi4O8Qn/xvLpMhrth3ji8n/GIPKouu7tBS3y853MbPGAQyTt27wk7iokRO8d62bzZRnG7sN5svAG+1Lqvqve8JGXjur0Ll7tCf/E75/xRPIWFx7wgDNi8ucT4OZNvHb2nktu8qrfxuyR2J7zWh2A6juKcPDhlcLx/1RU9IAxYPGJ4szylB8C8qfrFO276HjuWcQK9QdOJvCUQIjzjo8a8SeslvBrCKztCf/E66MrBOx1eCz2Xt+Q66YdtvKg9mrrLSq47fFznO1uUjDsoNTg8QyqwuzH4Ejz/Zi67A8A5uKg9GrtFkhq862ahOzSmXzkMDEs8q+vmvNVkLzwc1n28mu0+vCbekTyCg+K7ekgVvO8CAT2yRtc8apBPu1b2R7zUp4M8VW2RvPc9zrx69Hw753ObvCcSB71sG+u8OwHQuv67b7zLSi65HrWxO0ZPRrxmwPq7t7CmPGxvAzygnfC8oIsDvKY7tbwZF+07p2+qvOnbhbv0oW47/2auuThlcDwIxIM8n/EIO6ijH7vHetk7uRiRPGUDT7pgh5I85shcPpGQabykShS7FWmgPPjojDvJ8wc8mlPEOY2uJzt7FoW7HNb9O7rVvDzKjQI80NcuuqvINbvNTBO8TgFdvEJ/cbzEZoe8SVGrvMvkqLyHdui7P2ufvBSbMDw0t6O82GaUPOLmGrxSNze8KVjpuwizPzwqjN48Xh8ovE4B3TtiAeo8azsOO8eLnbyO4py7x/GiPIvgNzzvi7c8BFq0O/dOEj1fU5282ZoJPCL9+LqyIyY8IoUGPNI/mbwKpGC7EkQKuzrN2jwVzyU7QpA1vLIjpjwi64s8HYE8u6eSW7yryLU8yK5OOzysjjwi6wu8GsIrOu7xPDwCaRO8dzVsPP/vZLwT3oQ8cQ7xvOJv0TtWBww8hlM3PBPeBDxT9OK71pgkPPSysrugiwO90GDlvHOHHz3xfNg8904SPVpglzzmP6a7Cgrmu9/BBLyH7bG85QsxvVSfIb2Xt2Q8paG6vOqYsTos9Mi8nqxPu8wHWjuYhdS7GAWAvCIOvTp/bxA8j7CMPG1P4Dxd67I7xxRUvOM9wbxMhwU9Kp0iPfF82LvQYOU6XkJZPBxNx7y0nX28B5COO8FT3rp4eiW8R/oEvSfw/jtC9rq8n/GIux3nQTw8WPY8LBf6uzSmXzzSPxm88rDNvDysDjwyPnW7tdFyPBLNwDo8WHa8bPi5vOO0CrylGAQ8YgFqvEFLfDy7LOO7TIeFPAHPmDv3YP+6/+9kPBKqjzt5rpo8VJ+hvE7eKzyc3t88P2sfvLQUR7wJ1vC6exaFvD6dr7zNO888i+A3ulwuhzuF/JC8gKMFveoyLLxqBxk7YgFquws+2zwOUYS8agcZvGJ4M71AjtC747QKvAizP73UH3a7LvatPJBtuLzEzIy8bG8DvJEHM75E59s7zbIYPObZIL2uZJW7WRveugblTzy6TIa802JKvD9rH7xlA088QAWavIFP7bwL2FW8vqWRu0ZgijyRkGm7ZGnUvIeHLD1c2m48THbBPPkcAr1NzWc8+JT0uulkvLvXMp+7lU96u7kYET1xhTo8e3wKvItGPTxb+hG87mgGPWqhk7uhrrQ73rBAPCbNTT13rDW8K8DTus8s8DsNt4k8gpQmPLES4ryyvSA8lcbDO60woDyLVwE9BFq0u+cNFj3C7Vi8UXoLPDYOyryQ0z083+S1Ox34hTzEzIw7pX4Ju6ouuzxIpmw8w5iXuylYaTy5sgu9Js3NOo+fyLyjFp+8MMSdvOROBb2n+OA7b7fKOeIJzDoNpkW8WsYct7SdfTxXxLc7TO2KO3YB9zynktu7OkSkPKnXFLvtRv47AJujuzGSDT0twjg8AgOOO4d26DvpZDy8lAkYPI5r0zcGS9W8OGXwu9xIVjyH7TG9IUDNuiqMXrwb9qA79I+BPL1xHLuVPY07MOfOO0ztCruvMoW8BuXPu4AbeLyIRNg8uG3SPO5XQjuFH0K8zm9EPEAoSz0tKL652ZqJOgABqbwsjsM8mlPEPLewpjsVWNw8OGXwOlYHjLzfwQQ81iFbOyJ0Qj3d85S7cQ7xvIqswjxKhSC7906SvAFYz72xiau8LAWNPB1eCz09jGu72ZoJPfDiXTwPDrA8CYGvvNH6XzxTa6y8+RwCvY8of7xxDnG8Ef/QvJ9p+zqh0eU8a16/OzBN1LyDLiE9PFh2u+0jTbxLUxA9ZZ3JvItXgbqL4Dc8BuXPvKnXFDzmPyY8k/hTOlum+bqAksG8OZnluPmluLxRnTy6/KcdvKAUOrzRcSm8fqEgPcTeebzeOXc8KCR0OnN2W7xRA0K8Wsacu+M9wToyLIi8mTATu21P4LuadvW8Dtq6vPmlODsjqLe88ieXPJEHszySoa08U/RiPNQNCbwb9qC8bG+DOXW7FL0OdLW7Tc3nvG8dULsAJNo7fNMwO7sJMr2O4hy85ZTnuwAkWjw+Nyq8rcoaO+8lsrvx86E8U/TivGUUkzp6SJW8lT0NvWz4uTzeFka6qguKvIKD4rt/1ZU8LBf6vD6dr7es/Ko7qWBLvIlVHDxwUUU6Jt4RvRJEijnRcSk88235PGvVCL3zbfm8DaZFO+7xvLs3qES8oznQO9XKNDxZLKK8IIMhvComWb0CAw48fDk2O+nbBb29C5e8ogVbu1EUBryYhdS7OTPgOul1AD25sgs7i1cBPBYmzLtSroA8hfyQvP3bErz9h/o82ZoJO7/ZhjxtT+A8UZ28uzaFk7wJ1nA6dd7FPGg5Kbwb9iC8psRrvBXyVjzGRuS8uAfNu0+smzvFAAK96FN4vC2fhzy65oC7tgXou/9mLjxMELw8GSgxPRBlVjxDxCq80j8ZveinkDxHgzu70j8ZvPGNnDyPn0i8Vn9+urXR8ju10fI7sRJiPDBemLt8OTa8tJ39O4ne0rsaXKa7t0ohPHQhGrdYXjI824sqvDw1RT2/2YY8E/BxPIUOfjv9dQ08PM8/PMwYHrwwXpi7nqxPPM8aA7w+wOC7ROdbO79iPTxVbRE8U45dPOOjRjxwYok8ME1Uu1SfIbyifKQ8UXqLPI85wzsITTq8R+lAPMRVQzzcv58892B/Oqg9mjw3MXu7P9EkvM6AiLyx7zA8eHolPLYWLLugFLq8AJsjvEOzZjk6RKQ8uRgRPXVVjzw0HSk9PWk6PLss47spzzK93rBAvJpTxDun+OC7OTPgvEa1yzvAH+k5fZDcOid4jLuN0di8N7kIPPe0F7wVaSC8zxoDvJVgvrvUpwO9dd7FPKUHQLxn4oI7Ng7KPIydYzzZRvE8LTkCu3bvCTy10fK7QAWaPGHeOLu6+O27omvgO8Rmh7xrXj87AzeDvORg8jnGRuS8UEYWPLPg0TvYZpQ9FJuwPLC7O7xug1U8bvoevAnW8DvxFtM8kEoHPDxYdrzcWZq8n3q/O94nCjvZI0C82yUlvayWpbyHh6y7ME1UO9b+KTzbFGG89oCiPFpgFzzhTKA84gnMPKgsVjyia+C7XNpuPHxc5zyDLqG8ukyGvKqUQLwG5U88wB/pO+B+ML2O4py8MOdOPHt8irsDnYg6rv6PumJ4szzuV0I80qWePKTkDj14A9y8fqEgu9DXLjykbUU7yEhJvLYFaLyfVw68", + "index": 0, + "object": "embedding", + } + ], + "model": "text-embedding-ada-002-v2", + "object": "list", + "usage": {"prompt_tokens": 6, "total_tokens": 6}, + }, + ), + "You are a scientist.": ( + { + "Content-Type": "application/json", + "openai-model": "gpt-3.5-turbo-0613", + "openai-organization": "new-relic-nkmd8b", + "openai-processing-ms": "1469", + "openai-version": "2020-10-01", + "x-ratelimit-limit-requests": "200", + "x-ratelimit-limit-tokens": "40000", + "x-ratelimit-remaining-requests": "199", + "x-ratelimit-remaining-tokens": "39940", + "x-ratelimit-reset-requests": "7m12s", + "x-ratelimit-reset-tokens": "90ms", + "x-request-id": "49dbbffbd3c3f4612aa48def69059ccd", + }, + 200, + { + "choices": [ + { + "finish_reason": "stop", + "index": 0, + "message": { + "content": "212 degrees " "Fahrenheit is " "equal to 100 " "degrees " "Celsius.", + "role": "assistant", + }, + } + ], + "created": 1696888863, + "id": "chatcmpl-87sb95K4EF2nuJRcTs43Tm9ntTemv", + "model": "gpt-3.5-turbo-0613", + "object": "chat.completion", + "usage": {"completion_tokens": 11, "prompt_tokens": 53, "total_tokens": 64}, + }, + ), + "You are a mathematician.": ( + { + "Content-Type": "application/json", + "openai-model": "gpt-3.5-turbo-0613", + "openai-organization": "new-relic-nkmd8b", + "openai-processing-ms": "1469", + "openai-version": "2020-10-01", + "x-ratelimit-limit-requests": "200", + "x-ratelimit-limit-tokens": "40000", + "x-ratelimit-remaining-requests": "199", + "x-ratelimit-remaining-tokens": "39940", + "x-ratelimit-reset-requests": "7m12s", + "x-ratelimit-reset-tokens": "90ms", + "x-request-id": "49dbbffbd3c3f4612aa48def69059aad", + }, + 200, + { + "choices": [ + { + "finish_reason": "stop", + "index": 0, + "message": { + "content": "1 plus 2 is 3.", + "role": "assistant", + }, + } + ], + "created": 1696888865, + "id": "chatcmpl-87sb95K4EF2nuJRcTs43Tm9ntTeat", + "model": "gpt-3.5-turbo-0613", + "object": "chat.completion", + "usage": {"completion_tokens": 11, "prompt_tokens": 53, "total_tokens": 64}, + }, + ), +} + + +@pytest.fixture(scope="session") +def simple_get(openai_version, extract_shortened_prompt): + def _simple_get(self): + content_len = int(self.headers.get("content-length")) + content = json.loads(self.rfile.read(content_len).decode("utf-8")) + stream = content.get("stream", False) + prompt = extract_shortened_prompt(content) + if not prompt: + self.send_response(500) + self.end_headers() + self.wfile.write("Could not parse prompt.".encode("utf-8")) + return + + headers, response = ({}, "") + + if openai_version < (1, 0): + mocked_responses = RESPONSES + if stream: + mocked_responses = STREAMED_RESPONSES + else: + mocked_responses = RESPONSES_V1 + if stream: + mocked_responses = STREAMED_RESPONSES_V1 + + for k, v in mocked_responses.items(): + if prompt.startswith(k): + headers, status_code, response = v + break + else: # If no matches found + self.send_response(500) + self.end_headers() + self.wfile.write(("Unknown Prompt:\n%s" % prompt).encode("utf-8")) + return + + # Send response code + self.send_response(status_code) + + # Send headers + for k, v in headers.items(): + self.send_header(k, v) + self.end_headers() + + # Send response body + if stream and status_code < 400: + for resp in response: + data = json.dumps(resp).encode("utf-8") + if prompt == "Stream parsing error.": + # Force a parsing error by writing an invalid streamed response. + self.wfile.write(b"data: %s" % data) + else: + self.wfile.write(b"data: %s\n\n" % data) + else: + self.wfile.write(json.dumps(response).encode("utf-8")) + return + + return _simple_get + + +@pytest.fixture(scope="session") +def MockExternalOpenAIServer(simple_get): + class _MockExternalOpenAIServer(MockExternalHTTPServer): + # To use this class in a test one needs to start and stop this server + # before and after making requests to the test app that makes the external + # calls. + + def __init__(self, handler=simple_get, port=None, *args, **kwargs): + super(_MockExternalOpenAIServer, self).__init__(handler=handler, port=port, *args, **kwargs) + + return _MockExternalOpenAIServer + + +@pytest.fixture(scope="session") +def extract_shortened_prompt(openai_version): + def _extract_shortened_prompt(content): + if openai_version < (1, 0): + prompt = content.get("prompt", None) or content.get("input", None) or content.get("messages")[0]["content"] + else: + prompt = content.get("input", None) or content.get("messages")[0]["content"] + return prompt + + return _extract_shortened_prompt + + +def get_openai_version(): + # Import OpenAI so that get package version can catpure the version from the + # system module. OpenAI does not have a package version in v0. + import openai # noqa: F401; pylint: disable=W0611 + + return get_package_version_tuple("openai") + + +@pytest.fixture(scope="session") +def openai_version(): + return get_openai_version() + + +if __name__ == "__main__": + _MockExternalOpenAIServer = MockExternalOpenAIServer() + with MockExternalOpenAIServer() as server: + print("MockExternalOpenAIServer serving on port %s" % str(server.port)) + while True: + pass # Serve forever diff --git a/tests/mlmodel_openai/conftest.py b/tests/mlmodel_openai/conftest.py new file mode 100644 index 000000000..0a9f531e1 --- /dev/null +++ b/tests/mlmodel_openai/conftest.py @@ -0,0 +1,371 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import os + +import pytest +from _mock_external_openai_server import ( # noqa: F401; pylint: disable=W0611 + MockExternalOpenAIServer, + extract_shortened_prompt, + get_openai_version, + openai_version, + simple_get, +) +from testing_support.fixture.event_loop import ( # noqa: F401; pylint: disable=W0611 + event_loop as loop, +) +from testing_support.fixtures import ( # noqa: F401, pylint: disable=W0611 + collector_agent_registration_fixture, + collector_available_fixture, + override_application_settings, +) + +from newrelic.api.transaction import current_transaction +from newrelic.common.object_wrapper import ObjectProxy, wrap_function_wrapper +from newrelic.common.signature import bind_args + +_default_settings = { + "transaction_tracer.explain_threshold": 0.0, + "transaction_tracer.transaction_threshold": 0.0, + "transaction_tracer.stack_trace_threshold": 0.0, + "debug.log_data_collector_payloads": True, + "debug.record_transaction_failure": True, + "ml_insights_events.enabled": True, + "ai_monitoring.enabled": True, +} + +collector_agent_registration = collector_agent_registration_fixture( + app_name="Python Agent Test (mlmodel_openai)", + default_settings=_default_settings, + linked_applications=["Python Agent Test (mlmodel_openai)"], +) + +if get_openai_version() < (1, 0): + collect_ignore = [ + "test_chat_completion_v1.py", + "test_chat_completion_error_v1.py", + "test_embeddings_v1.py", + "test_embeddings_error_v1.py", + "test_chat_completion_stream_v1.py", + "test_chat_completion_stream_error_v1.py", + "test_embeddings_stream_v1.py", + ] +else: + collect_ignore = [ + "test_embeddings.py", + "test_embeddings_error.py", + "test_chat_completion.py", + "test_chat_completion_error.py", + "test_chat_completion_stream.py", + "test_chat_completion_stream_error.py", + ] + + +OPENAI_AUDIT_LOG_FILE = os.path.join(os.path.realpath(os.path.dirname(__file__)), "openai_audit.log") +OPENAI_AUDIT_LOG_CONTENTS = {} +# Intercept outgoing requests and log to file for mocking +RECORDED_HEADERS = set(["x-request-id", "content-type"]) + + +@pytest.fixture(scope="session") +def openai_clients(openai_version, MockExternalOpenAIServer): # noqa: F811 + """ + This configures the openai client and returns it for openai v1 and only configures + openai for v0 since there is no client. + """ + import openai + + from newrelic.core.config import _environ_as_bool + + if not _environ_as_bool("NEW_RELIC_TESTING_RECORD_OPENAI_RESPONSES", False): + with MockExternalOpenAIServer() as server: + if openai_version < (1, 0): + openai.api_base = "http://localhost:%d" % server.port + openai.api_key = "NOT-A-REAL-SECRET" + yield + else: + openai_sync = openai.OpenAI( + base_url="http://localhost:%d" % server.port, + api_key="NOT-A-REAL-SECRET", + ) + openai_async = openai.AsyncOpenAI( + base_url="http://localhost:%d" % server.port, + api_key="NOT-A-REAL-SECRET", + ) + yield (openai_sync, openai_async) + else: + openai_api_key = os.environ.get("OPENAI_API_KEY") + if not openai_api_key: + raise RuntimeError("OPENAI_API_KEY environment variable required.") + + if openai_version < (1, 0): + openai.api_key = openai_api_key + yield + else: + openai_sync = openai.OpenAI( + api_key=openai_api_key, + ) + openai_async = openai.AsyncOpenAI( + api_key=openai_api_key, + ) + yield (openai_sync, openai_async) + + +@pytest.fixture(scope="session") +def sync_openai_client(openai_clients): + sync_client, _ = openai_clients + return sync_client + + +@pytest.fixture(scope="session") +def async_openai_client(openai_clients): + _, async_client = openai_clients + return async_client + + +@pytest.fixture(autouse=True, scope="session") +def openai_server( + openai_version, # noqa: F811 + openai_clients, + wrap_openai_api_requestor_request, + wrap_openai_api_requestor_interpret_response, + wrap_httpx_client_send, + wrap_engine_api_resource_create, + wrap_stream_iter_events, +): + """ + This fixture will either create a mocked backend for testing purposes, or will + set up an audit log file to log responses of the real OpenAI backend to a file. + The behavior can be controlled by setting NEW_RELIC_TESTING_RECORD_OPENAI_RESPONSES=1 as + an environment variable to run using the real OpenAI backend. (Default: mocking) + """ + from newrelic.core.config import _environ_as_bool + + if _environ_as_bool("NEW_RELIC_TESTING_RECORD_OPENAI_RESPONSES", False): + if openai_version < (1, 0): + # Apply function wrappers to record data + wrap_function_wrapper("openai.api_requestor", "APIRequestor.request", wrap_openai_api_requestor_request) + wrap_function_wrapper( + "openai.api_requestor", "APIRequestor._interpret_response", wrap_openai_api_requestor_interpret_response + ) + wrap_function_wrapper( + "openai.api_resources.abstract.engine_api_resource", + "EngineAPIResource.create", + wrap_engine_api_resource_create, + ) + yield # Run tests + else: + # Apply function wrappers to record data + wrap_function_wrapper("httpx._client", "Client.send", wrap_httpx_client_send) + wrap_function_wrapper( + "openai._streaming", + "Stream._iter_events", + wrap_stream_iter_events, + ) + yield # Run tests + # Write responses to audit log + with open(OPENAI_AUDIT_LOG_FILE, "w") as audit_log_fp: + json.dump(OPENAI_AUDIT_LOG_CONTENTS, fp=audit_log_fp, indent=4) + else: + # We are mocking openai responses so we don't need to do anything in this case. + yield + + +@pytest.fixture(scope="session") +def wrap_httpx_client_send(extract_shortened_prompt): # noqa: F811 + def _wrap_httpx_client_send(wrapped, instance, args, kwargs): + bound_args = bind_args(wrapped, args, kwargs) + stream = bound_args.get("stream", False) + request = bound_args["request"] + if not request: + return wrapped(*args, **kwargs) + + params = json.loads(request.content.decode("utf-8")) + prompt = extract_shortened_prompt(params) + + # Send request + response = wrapped(*args, **kwargs) + + if response.status_code >= 400 or response.status_code < 200: + prompt = "error" + + rheaders = getattr(response, "headers") + + headers = dict( + filter( + lambda k: k[0].lower() in RECORDED_HEADERS + or k[0].lower().startswith("openai") + or k[0].lower().startswith("x-ratelimit"), + rheaders.items(), + ) + ) + if stream: + OPENAI_AUDIT_LOG_CONTENTS[prompt] = [headers, response.status_code, []] # Append response data to log + if prompt == "error": + OPENAI_AUDIT_LOG_CONTENTS[prompt][2] = json.loads(response.read()) + else: + body = json.loads(response.content.decode("utf-8")) + OPENAI_AUDIT_LOG_CONTENTS[prompt] = headers, response.status_code, body # Append response data to log + return response + + return _wrap_httpx_client_send + + +@pytest.fixture(scope="session") +def wrap_openai_api_requestor_interpret_response(): + def _wrap_openai_api_requestor_interpret_response(wrapped, instance, args, kwargs): + rbody, rcode, rheaders = bind_request_interpret_response_params(*args, **kwargs) + headers = dict( + filter( + lambda k: k[0].lower() in RECORDED_HEADERS + or k[0].lower().startswith("openai") + or k[0].lower().startswith("x-ratelimit"), + rheaders.items(), + ) + ) + + if rcode >= 400 or rcode < 200: + rbody = json.loads(rbody) + OPENAI_AUDIT_LOG_CONTENTS["error"] = headers, rcode, rbody # Append response data to audit log + return wrapped(*args, **kwargs) + + return _wrap_openai_api_requestor_interpret_response + + +@pytest.fixture(scope="session") +def wrap_openai_api_requestor_request(extract_shortened_prompt): # noqa: F811 + def _wrap_openai_api_requestor_request(wrapped, instance, args, kwargs): + params = bind_request_params(*args, **kwargs) + if not params: + return wrapped(*args, **kwargs) + + prompt = extract_shortened_prompt(params) + + # Send request + result = wrapped(*args, **kwargs) + + # Append response data to audit log + if not kwargs.get("stream", False): + # Clean up data + data = result[0].data + headers = result[0]._headers + headers = dict( + filter( + lambda k: k[0].lower() in RECORDED_HEADERS + or k[0].lower().startswith("openai") + or k[0].lower().startswith("x-ratelimit"), + headers.items(), + ) + ) + OPENAI_AUDIT_LOG_CONTENTS[prompt] = headers, 200, data + else: + OPENAI_AUDIT_LOG_CONTENTS[prompt] = [None, 200, []] + return result + + return _wrap_openai_api_requestor_request + + +def bind_request_params(method, url, params=None, *args, **kwargs): + return params + + +def bind_request_interpret_response_params(result, stream): + return result.content.decode("utf-8"), result.status_code, result.headers + + +@pytest.fixture(scope="session") +def generator_proxy(openai_version): + class GeneratorProxy(ObjectProxy): + def __init__(self, wrapped): + super(GeneratorProxy, self).__init__(wrapped) + + def __iter__(self): + return self + + # Make this Proxy a pass through to our instrumentation's proxy by passing along + # get attr and set attr calls to our instrumentation's proxy. + def __getattr__(self, attr): + return self.__wrapped__.__getattr__(attr) + + def __setattr__(self, attr, value): + return self.__wrapped__.__setattr__(attr, value) + + def __next__(self): + transaction = current_transaction() + if not transaction: + return self.__wrapped__.__next__() + + try: + return_val = self.__wrapped__.__next__() + if return_val: + prompt = [k for k in OPENAI_AUDIT_LOG_CONTENTS.keys()][-1] + if openai_version < (1, 0): + headers = dict( + filter( + lambda k: k[0].lower() in RECORDED_HEADERS + or k[0].lower().startswith("openai") + or k[0].lower().startswith("x-ratelimit"), + return_val._nr_response_headers.items(), + ) + ) + OPENAI_AUDIT_LOG_CONTENTS[prompt][0] = headers + OPENAI_AUDIT_LOG_CONTENTS[prompt][2].append(return_val.to_dict_recursive()) + else: + if not getattr(return_val, "data", "").startswith("[DONE]"): + OPENAI_AUDIT_LOG_CONTENTS[prompt][2].append(return_val.json()) + return return_val + except Exception as e: + raise + + def close(self): + return super(GeneratorProxy, self).close() + + return GeneratorProxy + + +@pytest.fixture(scope="session") +def wrap_engine_api_resource_create(generator_proxy): + def _wrap_engine_api_resource_create(wrapped, instance, args, kwargs): + transaction = current_transaction() + + if not transaction: + return wrapped(*args, **kwargs) + + bound_args = bind_args(wrapped, args, kwargs) + stream = bound_args["params"].get("stream", False) + + return_val = wrapped(*args, **kwargs) + + if stream: + return generator_proxy(return_val) + else: + return return_val + + return _wrap_engine_api_resource_create + + +@pytest.fixture(scope="session") +def wrap_stream_iter_events(generator_proxy): + def _wrap_stream_iter_events(wrapped, instance, args, kwargs): + transaction = current_transaction() + + if not transaction: + return wrapped(*args, **kwargs) + + return_val = wrapped(*args, **kwargs) + proxied_return_val = generator_proxy(return_val) + return proxied_return_val + + return _wrap_stream_iter_events diff --git a/tests/mlmodel_openai/test_chat_completion.py b/tests/mlmodel_openai/test_chat_completion.py new file mode 100644 index 000000000..5d7cf11d8 --- /dev/null +++ b/tests/mlmodel_openai/test_chat_completion.py @@ -0,0 +1,436 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import openai +from testing_support.fixtures import ( + override_llm_token_callback_settings, + reset_core_stats_engine, + validate_attributes, + validate_custom_event_count, +) +from testing_support.ml_testing_utils import ( # noqa: F401 + add_token_count_to_events, + disabled_ai_monitoring_record_content_settings, + disabled_ai_monitoring_settings, + disabled_ai_monitoring_streaming_settings, + events_sans_content, + events_sans_llm_metadata, + llm_token_count_callback, + set_trace_info, +) +from testing_support.validators.validate_custom_events import validate_custom_events +from testing_support.validators.validate_transaction_metrics import ( + validate_transaction_metrics, +) + +from newrelic.api.background_task import background_task +from newrelic.api.transaction import add_custom_attribute + +_test_openai_chat_completion_messages = ( + {"role": "system", "content": "You are a scientist."}, + {"role": "user", "content": "What is 212 degrees Fahrenheit converted to Celsius?"}, +) + +chat_completion_recorded_events = [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "span_id": None, + "trace_id": "trace-id", + "request_id": "49dbbffbd3c3f4612aa48def69059ccd", + "duration": None, # Response time varies each test run + "request.model": "gpt-3.5-turbo", + "response.model": "gpt-3.5-turbo-0613", + "response.organization": "new-relic-nkmd8b", + "request.temperature": 0.7, + "request.max_tokens": 100, + "response.choices.finish_reason": "stop", + "response.headers.llmVersion": "2020-10-01", + "response.headers.ratelimitLimitRequests": 200, + "response.headers.ratelimitLimitTokens": 40000, + "response.headers.ratelimitResetTokens": "90ms", + "response.headers.ratelimitResetRequests": "7m12s", + "response.headers.ratelimitRemainingTokens": 39940, + "response.headers.ratelimitRemainingRequests": 199, + "vendor": "openai", + "ingest_source": "Python", + "response.number_of_messages": 3, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": "chatcmpl-87sb95K4EF2nuJRcTs43Tm9ntTemv-0", + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": "49dbbffbd3c3f4612aa48def69059ccd", + "span_id": None, + "trace_id": "trace-id", + "content": "You are a scientist.", + "role": "system", + "completion_id": None, + "sequence": 0, + "response.model": "gpt-3.5-turbo-0613", + "vendor": "openai", + "ingest_source": "Python", + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": "chatcmpl-87sb95K4EF2nuJRcTs43Tm9ntTemv-1", + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": "49dbbffbd3c3f4612aa48def69059ccd", + "span_id": None, + "trace_id": "trace-id", + "content": "What is 212 degrees Fahrenheit converted to Celsius?", + "role": "user", + "completion_id": None, + "sequence": 1, + "response.model": "gpt-3.5-turbo-0613", + "vendor": "openai", + "ingest_source": "Python", + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": "chatcmpl-87sb95K4EF2nuJRcTs43Tm9ntTemv-2", + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": "49dbbffbd3c3f4612aa48def69059ccd", + "span_id": None, + "trace_id": "trace-id", + "content": "212 degrees Fahrenheit is equal to 100 degrees Celsius.", + "role": "assistant", + "completion_id": None, + "sequence": 2, + "response.model": "gpt-3.5-turbo-0613", + "vendor": "openai", + "is_response": True, + "ingest_source": "Python", + }, + ), +] + + +@reset_core_stats_engine() +@validate_custom_events(chat_completion_recorded_events) +# One summary event, one system message, one user message, and one response message from the assistant +@validate_custom_event_count(count=4) +@validate_transaction_metrics( + name="test_chat_completion:test_openai_chat_completion_sync_with_llm_metadata", + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_attributes("agent", ["llm"]) +@background_task() +def test_openai_chat_completion_sync_with_llm_metadata(set_trace_info): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") + + openai.ChatCompletion.create( + model="gpt-3.5-turbo", messages=_test_openai_chat_completion_messages, temperature=0.7, max_tokens=100 + ) + + +@reset_core_stats_engine() +@disabled_ai_monitoring_record_content_settings +@validate_custom_events(events_sans_content(chat_completion_recorded_events)) +# One summary event, one system message, one user message, and one response message from the assistant +@validate_custom_event_count(count=4) +@validate_transaction_metrics( + name="test_chat_completion:test_openai_chat_completion_sync_no_content", + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_attributes("agent", ["llm"]) +@background_task() +def test_openai_chat_completion_sync_no_content(set_trace_info): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + + openai.ChatCompletion.create( + model="gpt-3.5-turbo", messages=_test_openai_chat_completion_messages, temperature=0.7, max_tokens=100 + ) + + +@reset_core_stats_engine() +@override_llm_token_callback_settings(llm_token_count_callback) +@validate_custom_events(add_token_count_to_events(chat_completion_recorded_events)) +# One summary event, one system message, one user message, and one response message from the assistant +@validate_custom_event_count(count=4) +@validate_transaction_metrics( + name="test_chat_completion:test_openai_chat_completion_sync_with_token_count", + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_attributes("agent", ["llm"]) +@background_task() +def test_openai_chat_completion_sync_with_token_count(set_trace_info): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + + openai.ChatCompletion.create( + model="gpt-3.5-turbo", messages=_test_openai_chat_completion_messages, temperature=0.7, max_tokens=100 + ) + + +@reset_core_stats_engine() +@validate_custom_events(events_sans_llm_metadata(chat_completion_recorded_events)) +# One summary event, one system message, one user message, and one response message from the assistant +@validate_custom_event_count(count=4) +@validate_transaction_metrics( + "test_chat_completion:test_openai_chat_completion_sync_no_llm_metadata", + scoped_metrics=[("Llm/completion/OpenAI/create", 1)], + rollup_metrics=[("Llm/completion/OpenAI/create", 1)], + background_task=True, +) +@background_task() +def test_openai_chat_completion_sync_no_llm_metadata(set_trace_info): + set_trace_info() + + openai.ChatCompletion.create( + model="gpt-3.5-turbo", messages=_test_openai_chat_completion_messages, temperature=0.7, max_tokens=100 + ) + + +@reset_core_stats_engine() +@disabled_ai_monitoring_streaming_settings +@validate_custom_events(events_sans_llm_metadata(chat_completion_recorded_events)) +# One summary event, one system message, one user message, and one response message from the assistant +@validate_custom_event_count(count=4) +@validate_transaction_metrics( + "test_chat_completion:test_openai_chat_completion_sync_stream_monitoring_disabled", + scoped_metrics=[("Llm/completion/OpenAI/create", 1)], + rollup_metrics=[("Llm/completion/OpenAI/create", 1)], + background_task=True, +) +@background_task() +def test_openai_chat_completion_sync_stream_monitoring_disabled(set_trace_info): + set_trace_info() + + openai.ChatCompletion.create( + model="gpt-3.5-turbo", messages=_test_openai_chat_completion_messages, temperature=0.7, max_tokens=100 + ) + + +@reset_core_stats_engine() +@validate_custom_event_count(count=0) +def test_openai_chat_completion_sync_outside_txn(): + openai.ChatCompletion.create( + model="gpt-3.5-turbo", messages=_test_openai_chat_completion_messages, temperature=0.7, max_tokens=100 + ) + + +@disabled_ai_monitoring_settings +@reset_core_stats_engine() +@validate_custom_event_count(count=0) +@background_task() +def test_openai_chat_completion_sync_ai_monitoring_disabled(): + openai.ChatCompletion.create( + model="gpt-3.5-turbo", messages=_test_openai_chat_completion_messages, temperature=0.7, max_tokens=100 + ) + + +@reset_core_stats_engine() +@validate_custom_events(events_sans_llm_metadata(chat_completion_recorded_events)) +@validate_custom_event_count(count=4) +@validate_transaction_metrics( + "test_chat_completion:test_openai_chat_completion_async_no_llm_metadata", + scoped_metrics=[("Llm/completion/OpenAI/acreate", 1)], + rollup_metrics=[("Llm/completion/OpenAI/acreate", 1)], + background_task=True, +) +@background_task() +def test_openai_chat_completion_async_no_llm_metadata(loop, set_trace_info): + set_trace_info() + + loop.run_until_complete( + openai.ChatCompletion.acreate( + model="gpt-3.5-turbo", messages=_test_openai_chat_completion_messages, temperature=0.7, max_tokens=100 + ) + ) + + +@reset_core_stats_engine() +@disabled_ai_monitoring_streaming_settings +@validate_custom_events(events_sans_llm_metadata(chat_completion_recorded_events)) +@validate_custom_event_count(count=4) +@validate_transaction_metrics( + "test_chat_completion:test_openai_chat_completion_async_stream_monitoring_disabled", + scoped_metrics=[("Llm/completion/OpenAI/acreate", 1)], + rollup_metrics=[("Llm/completion/OpenAI/acreate", 1)], + background_task=True, +) +@background_task() +def test_openai_chat_completion_async_stream_monitoring_disabled(loop, set_trace_info): + set_trace_info() + + loop.run_until_complete( + openai.ChatCompletion.acreate( + model="gpt-3.5-turbo", messages=_test_openai_chat_completion_messages, temperature=0.7, max_tokens=100 + ) + ) + + +@reset_core_stats_engine() +@validate_custom_events(chat_completion_recorded_events) +@validate_custom_event_count(count=4) +@validate_transaction_metrics( + "test_chat_completion:test_openai_chat_completion_async_with_llm_metadata", + scoped_metrics=[("Llm/completion/OpenAI/acreate", 1)], + rollup_metrics=[("Llm/completion/OpenAI/acreate", 1)], + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_attributes("agent", ["llm"]) +@background_task() +def test_openai_chat_completion_async_with_llm_metadata(loop, set_trace_info): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") + + loop.run_until_complete( + openai.ChatCompletion.acreate( + model="gpt-3.5-turbo", messages=_test_openai_chat_completion_messages, temperature=0.7, max_tokens=100 + ) + ) + + +@reset_core_stats_engine() +@disabled_ai_monitoring_record_content_settings +@validate_custom_events(events_sans_content(chat_completion_recorded_events)) +@validate_custom_event_count(count=4) +@validate_transaction_metrics( + "test_chat_completion:test_openai_chat_completion_async_no_content", + scoped_metrics=[("Llm/completion/OpenAI/acreate", 1)], + rollup_metrics=[("Llm/completion/OpenAI/acreate", 1)], + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_attributes("agent", ["llm"]) +@background_task() +def test_openai_chat_completion_async_no_content(loop, set_trace_info): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + + loop.run_until_complete( + openai.ChatCompletion.acreate( + model="gpt-3.5-turbo", messages=_test_openai_chat_completion_messages, temperature=0.7, max_tokens=100 + ) + ) + + +@reset_core_stats_engine() +@override_llm_token_callback_settings(llm_token_count_callback) +@validate_custom_events(add_token_count_to_events(chat_completion_recorded_events)) +# One summary event, one system message, one user message, and one response message from the assistant +@validate_custom_event_count(count=4) +@validate_transaction_metrics( + name="test_chat_completion:test_openai_chat_completion_async_with_token_count", + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_attributes("agent", ["llm"]) +@background_task() +def test_openai_chat_completion_async_with_token_count(loop, set_trace_info): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + + loop.run_until_complete( + openai.ChatCompletion.acreate( + model="gpt-3.5-turbo", messages=_test_openai_chat_completion_messages, temperature=0.7, max_tokens=100 + ) + ) + + +@reset_core_stats_engine() +@validate_custom_event_count(count=0) +def test_openai_chat_completion_async_outside_transaction(loop): + loop.run_until_complete( + openai.ChatCompletion.acreate( + model="gpt-3.5-turbo", messages=_test_openai_chat_completion_messages, temperature=0.7, max_tokens=100 + ) + ) + + +@disabled_ai_monitoring_settings +@reset_core_stats_engine() +@validate_custom_event_count(count=0) +@background_task() +def test_openai_chat_completion_async_ai_monitoring_disabled(loop): + loop.run_until_complete( + openai.ChatCompletion.acreate( + model="gpt-3.5-turbo", messages=_test_openai_chat_completion_messages, temperature=0.7, max_tokens=100 + ) + ) + + +@reset_core_stats_engine() +# One summary event, one system message, one user message, and one response message from the assistant +@validate_custom_event_count(count=4) +@validate_attributes("agent", ["llm"]) +@background_task() +def test_openai_chat_completion_sync_no_usage_data(set_trace_info): + # Only testing that there are events, and there was no exception raised + set_trace_info() + + openai.ChatCompletion.create( + model="gpt-3.5-turbo", messages=_test_openai_chat_completion_messages, temperature=0.7, max_tokens=100 + ) + + +@reset_core_stats_engine() +# One summary event, one system message, one user message, and one response message from the assistant +@validate_custom_event_count(count=4) +@validate_attributes("agent", ["llm"]) +@background_task() +def test_openai_chat_completion_async_no_usage_data(loop, set_trace_info): + # Only testing that there are events, and there was no exception raised + set_trace_info() + + loop.run_until_complete( + openai.ChatCompletion.acreate( + model="gpt-3.5-turbo", messages=_test_openai_chat_completion_messages, temperature=0.7, max_tokens=100 + ) + ) + + +def test_openai_chat_completion_functions_marked_as_wrapped_for_sdk_compatibility(): + assert openai.ChatCompletion._nr_wrapped + assert openai.util.convert_to_openai_object._nr_wrapped diff --git a/tests/mlmodel_openai/test_chat_completion_error.py b/tests/mlmodel_openai/test_chat_completion_error.py new file mode 100644 index 000000000..e1fe99329 --- /dev/null +++ b/tests/mlmodel_openai/test_chat_completion_error.py @@ -0,0 +1,698 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import openai +import pytest +from testing_support.fixtures import ( + dt_enabled, + override_llm_token_callback_settings, + reset_core_stats_engine, + validate_custom_event_count, +) +from testing_support.ml_testing_utils import ( # noqa: F401 + add_token_count_to_events, + disabled_ai_monitoring_record_content_settings, + events_sans_content, + llm_token_count_callback, + set_trace_info, +) +from testing_support.validators.validate_custom_events import validate_custom_events +from testing_support.validators.validate_error_trace_attributes import ( + validate_error_trace_attributes, +) +from testing_support.validators.validate_span_events import validate_span_events +from testing_support.validators.validate_transaction_metrics import ( + validate_transaction_metrics, +) + +from newrelic.api.background_task import background_task +from newrelic.api.transaction import add_custom_attribute +from newrelic.common.object_names import callable_name + +_test_openai_chat_completion_messages = ( + {"role": "system", "content": "You are a scientist."}, + {"role": "user", "content": "What is 212 degrees Fahrenheit converted to Celsius?"}, +) + +expected_events_on_no_model_error = [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "span_id": None, + "trace_id": "trace-id", + "duration": None, # Response time varies each test run + "request.temperature": 0.7, + "request.max_tokens": 100, + "response.number_of_messages": 2, + "vendor": "openai", + "ingest_source": "Python", + "error": True, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, + "llm.conversation_id": "my-awesome-id", + "span_id": None, + "trace_id": "trace-id", + "content": "You are a scientist.", + "role": "system", + "completion_id": None, + "sequence": 0, + "vendor": "openai", + "ingest_source": "Python", + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, + "llm.conversation_id": "my-awesome-id", + "span_id": None, + "trace_id": "trace-id", + "content": "What is 212 degrees Fahrenheit converted to Celsius?", + "role": "user", + "completion_id": None, + "sequence": 1, + "vendor": "openai", + "ingest_source": "Python", + }, + ), +] + + +# No model provided +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(openai.InvalidRequestError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "error.param": "engine", + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "Must provide an 'engine' or 'model' parameter to create a ", + } +) +@validate_transaction_metrics( + "test_chat_completion_error:test_chat_completion_invalid_request_error_no_model", + scoped_metrics=[("Llm/completion/OpenAI/create", 1)], + rollup_metrics=[("Llm/completion/OpenAI/create", 1)], + background_task=True, +) +@validate_custom_events(expected_events_on_no_model_error) +@validate_custom_event_count(count=3) +@background_task() +def test_chat_completion_invalid_request_error_no_model(set_trace_info): + with pytest.raises(openai.InvalidRequestError): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + openai.ChatCompletion.create( + # no model provided, + messages=_test_openai_chat_completion_messages, + temperature=0.7, + max_tokens=100, + ) + + +@dt_enabled +@disabled_ai_monitoring_record_content_settings +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(openai.InvalidRequestError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "error.param": "engine", + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "Must provide an 'engine' or 'model' parameter to create a ", + } +) +@validate_transaction_metrics( + "test_chat_completion_error:test_chat_completion_invalid_request_error_no_model_no_content", + scoped_metrics=[("Llm/completion/OpenAI/create", 1)], + rollup_metrics=[("Llm/completion/OpenAI/create", 1)], + background_task=True, +) +@validate_custom_events(events_sans_content(expected_events_on_no_model_error)) +@validate_custom_event_count(count=3) +@background_task() +def test_chat_completion_invalid_request_error_no_model_no_content(set_trace_info): + with pytest.raises(openai.InvalidRequestError): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + openai.ChatCompletion.create( + # no model provided, + messages=_test_openai_chat_completion_messages, + temperature=0.7, + max_tokens=100, + ) + + +expected_events_on_invalid_model_error = [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "span_id": None, + "trace_id": "trace-id", + "duration": None, # Response time varies each test run + "request.model": "does-not-exist", + "request.temperature": 0.7, + "request.max_tokens": 100, + "response.number_of_messages": 1, + "vendor": "openai", + "ingest_source": "Python", + "error": True, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, + "llm.conversation_id": "my-awesome-id", + "span_id": None, + "trace_id": "trace-id", + "content": "Model does not exist.", + "role": "user", + "completion_id": None, + "sequence": 0, + "vendor": "openai", + "ingest_source": "Python", + }, + ), +] + + +@dt_enabled +@reset_core_stats_engine() +@override_llm_token_callback_settings(llm_token_count_callback) +@validate_error_trace_attributes( + callable_name(openai.InvalidRequestError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "error.code": "model_not_found", + "http.statusCode": 404, + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "The model `does-not-exist` does not exist", + } +) +@validate_transaction_metrics( + "test_chat_completion_error:test_chat_completion_invalid_request_error_invalid_model_with_token_count", + scoped_metrics=[("Llm/completion/OpenAI/create", 1)], + rollup_metrics=[("Llm/completion/OpenAI/create", 1)], + background_task=True, +) +@validate_custom_events(add_token_count_to_events(expected_events_on_invalid_model_error)) +@validate_custom_event_count(count=2) +@background_task() +def test_chat_completion_invalid_request_error_invalid_model_with_token_count(set_trace_info): + with pytest.raises(openai.InvalidRequestError): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + + openai.ChatCompletion.create( + model="does-not-exist", + messages=({"role": "user", "content": "Model does not exist."},), + temperature=0.7, + max_tokens=100, + ) + + +# Invalid model provided +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(openai.InvalidRequestError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "error.code": "model_not_found", + "http.statusCode": 404, + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "The model `does-not-exist` does not exist", + } +) +@validate_transaction_metrics( + "test_chat_completion_error:test_chat_completion_invalid_request_error_invalid_model", + scoped_metrics=[("Llm/completion/OpenAI/create", 1)], + rollup_metrics=[("Llm/completion/OpenAI/create", 1)], + background_task=True, +) +@validate_custom_events(expected_events_on_invalid_model_error) +@validate_custom_event_count(count=2) +@background_task() +def test_chat_completion_invalid_request_error_invalid_model(set_trace_info): + with pytest.raises(openai.InvalidRequestError): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + openai.ChatCompletion.create( + model="does-not-exist", + messages=({"role": "user", "content": "Model does not exist."},), + temperature=0.7, + max_tokens=100, + ) + + +expected_events_on_auth_error = [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "span_id": None, + "trace_id": "trace-id", + "duration": None, # Response time varies each test run + "request.model": "gpt-3.5-turbo", + "request.temperature": 0.7, + "request.max_tokens": 100, + "response.number_of_messages": 2, + "vendor": "openai", + "ingest_source": "Python", + "error": True, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, + "llm.conversation_id": "my-awesome-id", + "span_id": None, + "trace_id": "trace-id", + "content": "You are a scientist.", + "role": "system", + "completion_id": None, + "sequence": 0, + "vendor": "openai", + "ingest_source": "Python", + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, + "llm.conversation_id": "my-awesome-id", + "span_id": None, + "trace_id": "trace-id", + "content": "What is 212 degrees Fahrenheit converted to Celsius?", + "role": "user", + "completion_id": None, + "sequence": 1, + "vendor": "openai", + "ingest_source": "Python", + }, + ), +] + + +# No api_key provided +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(openai.error.AuthenticationError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": {}, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "No API key provided. You can set your API key in code using 'openai.api_key = ', or you can set the environment variable OPENAI_API_KEY=). If your API key is stored in a file, you can point the openai module at it with 'openai.api_key_path = '. You can generate API keys in the OpenAI web interface. See https://platform.openai.com/account/api-keys for details.", + } +) +@validate_transaction_metrics( + "test_chat_completion_error:test_chat_completion_authentication_error", + scoped_metrics=[("Llm/completion/OpenAI/create", 1)], + rollup_metrics=[("Llm/completion/OpenAI/create", 1)], + background_task=True, +) +@validate_custom_events(expected_events_on_auth_error) +@validate_custom_event_count(count=3) +@background_task() +def test_chat_completion_authentication_error(monkeypatch, set_trace_info): + with pytest.raises(openai.error.AuthenticationError): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + monkeypatch.setattr(openai, "api_key", None) # openai.api_key = None + openai.ChatCompletion.create( + model="gpt-3.5-turbo", + messages=_test_openai_chat_completion_messages, + temperature=0.7, + max_tokens=100, + ) + + +expected_events_on_wrong_api_key_error = [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, # UUID that varies with each run + "span_id": None, + "trace_id": "trace-id", + "duration": None, # Response time varies each test run + "request.model": "gpt-3.5-turbo", + "request.temperature": 0.7, + "request.max_tokens": 100, + "response.number_of_messages": 1, + "vendor": "openai", + "ingest_source": "Python", + "error": True, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, + "span_id": None, + "trace_id": "trace-id", + "content": "Invalid API key.", + "role": "user", + "completion_id": None, + "sequence": 0, + "vendor": "openai", + "ingest_source": "Python", + }, + ), +] + + +# Wrong api_key provided +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(openai.error.AuthenticationError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "http.statusCode": 401, + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "Incorrect API key provided: invalid. You can find your API key at https://platform.openai.com/account/api-keys.", + } +) +@validate_transaction_metrics( + "test_chat_completion_error:test_chat_completion_wrong_api_key_error", + scoped_metrics=[("Llm/completion/OpenAI/create", 1)], + rollup_metrics=[("Llm/completion/OpenAI/create", 1)], + background_task=True, +) +@validate_custom_events(expected_events_on_wrong_api_key_error) +@validate_custom_event_count(count=2) +@background_task() +def test_chat_completion_wrong_api_key_error(monkeypatch, set_trace_info): + with pytest.raises(openai.error.AuthenticationError): + set_trace_info() + monkeypatch.setattr(openai, "api_key", "DEADBEEF") # openai.api_key = "DEADBEEF" + openai.ChatCompletion.create( + model="gpt-3.5-turbo", + messages=({"role": "user", "content": "Invalid API key."},), + temperature=0.7, + max_tokens=100, + ) + + +# Async tests: +# No model provided +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(openai.InvalidRequestError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "error.param": "engine", + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "Must provide an 'engine' or 'model' parameter to create a ", + } +) +@validate_transaction_metrics( + "test_chat_completion_error:test_chat_completion_invalid_request_error_no_model_async", + scoped_metrics=[("Llm/completion/OpenAI/acreate", 1)], + rollup_metrics=[("Llm/completion/OpenAI/acreate", 1)], + background_task=True, +) +@validate_custom_events(expected_events_on_no_model_error) +@validate_custom_event_count(count=3) +@background_task() +def test_chat_completion_invalid_request_error_no_model_async(loop, set_trace_info): + with pytest.raises(openai.InvalidRequestError): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + loop.run_until_complete( + openai.ChatCompletion.acreate( + # no model provided, + messages=_test_openai_chat_completion_messages, + temperature=0.7, + max_tokens=100, + ) + ) + + +@dt_enabled +@reset_core_stats_engine() +@disabled_ai_monitoring_record_content_settings +@validate_error_trace_attributes( + callable_name(openai.InvalidRequestError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "error.param": "engine", + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "Must provide an 'engine' or 'model' parameter to create a ", + } +) +@validate_transaction_metrics( + "test_chat_completion_error:test_chat_completion_invalid_request_error_no_model_async_no_content", + scoped_metrics=[("Llm/completion/OpenAI/acreate", 1)], + rollup_metrics=[("Llm/completion/OpenAI/acreate", 1)], + background_task=True, +) +@validate_custom_events(events_sans_content(expected_events_on_no_model_error)) +@validate_custom_event_count(count=3) +@background_task() +def test_chat_completion_invalid_request_error_no_model_async_no_content(loop, set_trace_info): + with pytest.raises(openai.InvalidRequestError): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + loop.run_until_complete( + openai.ChatCompletion.acreate( + # no model provided, + messages=_test_openai_chat_completion_messages, + temperature=0.7, + max_tokens=100, + ) + ) + + +@dt_enabled +@reset_core_stats_engine() +@override_llm_token_callback_settings(llm_token_count_callback) +@validate_error_trace_attributes( + callable_name(openai.InvalidRequestError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "error.code": "model_not_found", + "http.statusCode": 404, + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "The model `does-not-exist` does not exist", + } +) +@validate_transaction_metrics( + "test_chat_completion_error:test_chat_completion_invalid_request_error_invalid_model_with_token_count_async", + scoped_metrics=[("Llm/completion/OpenAI/acreate", 1)], + rollup_metrics=[("Llm/completion/OpenAI/acreate", 1)], + background_task=True, +) +@validate_custom_events(add_token_count_to_events(expected_events_on_invalid_model_error)) +@validate_custom_event_count(count=2) +@background_task() +def test_chat_completion_invalid_request_error_invalid_model_with_token_count_async(loop, set_trace_info): + with pytest.raises(openai.InvalidRequestError): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + loop.run_until_complete( + openai.ChatCompletion.acreate( + model="does-not-exist", + messages=({"role": "user", "content": "Model does not exist."},), + temperature=0.7, + max_tokens=100, + ) + ) + + +# Invalid model provided +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(openai.InvalidRequestError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "error.code": "model_not_found", + "http.statusCode": 404, + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "The model `does-not-exist` does not exist", + } +) +@validate_transaction_metrics( + "test_chat_completion_error:test_chat_completion_invalid_request_error_invalid_model_async", + scoped_metrics=[("Llm/completion/OpenAI/acreate", 1)], + rollup_metrics=[("Llm/completion/OpenAI/acreate", 1)], + background_task=True, +) +@validate_custom_events(expected_events_on_invalid_model_error) +@validate_custom_event_count(count=2) +@background_task() +def test_chat_completion_invalid_request_error_invalid_model_async(loop, set_trace_info): + with pytest.raises(openai.InvalidRequestError): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + loop.run_until_complete( + openai.ChatCompletion.acreate( + model="does-not-exist", + messages=({"role": "user", "content": "Model does not exist."},), + temperature=0.7, + max_tokens=100, + ) + ) + + +# No api_key provided +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(openai.error.AuthenticationError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": {}, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "No API key provided. You can set your API key in code using 'openai.api_key = ', or you can set the environment variable OPENAI_API_KEY=). If your API key is stored in a file, you can point the openai module at it with 'openai.api_key_path = '. You can generate API keys in the OpenAI web interface. See https://platform.openai.com/account/api-keys for details.", + } +) +@validate_transaction_metrics( + "test_chat_completion_error:test_chat_completion_authentication_error_async", + scoped_metrics=[("Llm/completion/OpenAI/acreate", 1)], + rollup_metrics=[("Llm/completion/OpenAI/acreate", 1)], + background_task=True, +) +@validate_custom_events(expected_events_on_auth_error) +@validate_custom_event_count(count=3) +@background_task() +def test_chat_completion_authentication_error_async(loop, monkeypatch, set_trace_info): + with pytest.raises(openai.error.AuthenticationError): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + monkeypatch.setattr(openai, "api_key", None) # openai.api_key = None + loop.run_until_complete( + openai.ChatCompletion.acreate( + model="gpt-3.5-turbo", messages=_test_openai_chat_completion_messages, temperature=0.7, max_tokens=100 + ) + ) + + +# Wrong api_key provided +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(openai.error.AuthenticationError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "http.statusCode": 401, + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "Incorrect API key provided: invalid. You can find your API key at https://platform.openai.com/account/api-keys.", + } +) +@validate_transaction_metrics( + "test_chat_completion_error:test_chat_completion_wrong_api_key_error_async", + scoped_metrics=[("Llm/completion/OpenAI/acreate", 1)], + rollup_metrics=[("Llm/completion/OpenAI/acreate", 1)], + background_task=True, +) +@validate_custom_events(expected_events_on_wrong_api_key_error) +@validate_custom_event_count(count=2) +@background_task() +def test_chat_completion_wrong_api_key_error_async(loop, monkeypatch, set_trace_info): + with pytest.raises(openai.error.AuthenticationError): + set_trace_info() + monkeypatch.setattr(openai, "api_key", "DEADBEEF") # openai.api_key = "DEADBEEF" + loop.run_until_complete( + openai.ChatCompletion.acreate( + model="gpt-3.5-turbo", + messages=({"role": "user", "content": "Invalid API key."},), + temperature=0.7, + max_tokens=100, + ) + ) diff --git a/tests/mlmodel_openai/test_chat_completion_error_v1.py b/tests/mlmodel_openai/test_chat_completion_error_v1.py new file mode 100644 index 000000000..735664cac --- /dev/null +++ b/tests/mlmodel_openai/test_chat_completion_error_v1.py @@ -0,0 +1,545 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import openai +import pytest +from testing_support.fixtures import ( + dt_enabled, + override_llm_token_callback_settings, + reset_core_stats_engine, + validate_custom_event_count, +) +from testing_support.ml_testing_utils import ( # noqa: F401 + add_token_count_to_events, + disabled_ai_monitoring_record_content_settings, + events_sans_content, + llm_token_count_callback, + set_trace_info, +) +from testing_support.validators.validate_custom_events import validate_custom_events +from testing_support.validators.validate_error_trace_attributes import ( + validate_error_trace_attributes, +) +from testing_support.validators.validate_span_events import validate_span_events +from testing_support.validators.validate_transaction_metrics import ( + validate_transaction_metrics, +) + +from newrelic.api.background_task import background_task +from newrelic.api.transaction import add_custom_attribute +from newrelic.common.object_names import callable_name + +_test_openai_chat_completion_messages = ( + {"role": "system", "content": "You are a scientist."}, + {"role": "user", "content": "What is 212 degrees Fahrenheit converted to Celsius?"}, +) + +expected_events_on_no_model_error = [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "span_id": None, + "trace_id": "trace-id", + "duration": None, # Response time varies each test run + "request.temperature": 0.7, + "request.max_tokens": 100, + "response.number_of_messages": 2, + "vendor": "openai", + "ingest_source": "Python", + "error": True, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, + "llm.conversation_id": "my-awesome-id", + "span_id": None, + "trace_id": "trace-id", + "content": "You are a scientist.", + "role": "system", + "completion_id": None, + "sequence": 0, + "vendor": "openai", + "ingest_source": "Python", + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, + "llm.conversation_id": "my-awesome-id", + "span_id": None, + "trace_id": "trace-id", + "content": "What is 212 degrees Fahrenheit converted to Celsius?", + "role": "user", + "completion_id": None, + "sequence": 1, + "vendor": "openai", + "ingest_source": "Python", + }, + ), +] + + +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(TypeError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": {}, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "Missing required arguments; Expected either ('messages' and 'model') or ('messages', 'model' and 'stream') arguments to be given", + } +) +@validate_transaction_metrics( + "test_chat_completion_error_v1:test_chat_completion_invalid_request_error_no_model", + scoped_metrics=[("Llm/completion/OpenAI/create", 1)], + rollup_metrics=[("Llm/completion/OpenAI/create", 1)], + background_task=True, +) +@validate_custom_events(expected_events_on_no_model_error) +@validate_custom_event_count(count=3) +@background_task() +def test_chat_completion_invalid_request_error_no_model(set_trace_info, sync_openai_client): + with pytest.raises(TypeError): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + sync_openai_client.chat.completions.create( + messages=_test_openai_chat_completion_messages, temperature=0.7, max_tokens=100 + ) + + +@reset_core_stats_engine() +@disabled_ai_monitoring_record_content_settings +@validate_error_trace_attributes( + callable_name(TypeError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": {}, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "Missing required arguments; Expected either ('messages' and 'model') or ('messages', 'model' and 'stream') arguments to be given", + } +) +@validate_transaction_metrics( + "test_chat_completion_error_v1:test_chat_completion_invalid_request_error_no_model_no_content", + scoped_metrics=[("Llm/completion/OpenAI/create", 1)], + rollup_metrics=[("Llm/completion/OpenAI/create", 1)], + background_task=True, +) +@validate_custom_events(events_sans_content(expected_events_on_no_model_error)) +@validate_custom_event_count(count=3) +@background_task() +def test_chat_completion_invalid_request_error_no_model_no_content(set_trace_info, sync_openai_client): + with pytest.raises(TypeError): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + sync_openai_client.chat.completions.create( + messages=_test_openai_chat_completion_messages, temperature=0.7, max_tokens=100 + ) + + +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(TypeError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": {}, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "Missing required arguments; Expected either ('messages' and 'model') or ('messages', 'model' and 'stream') arguments to be given", + } +) +@validate_transaction_metrics( + "test_chat_completion_error_v1:test_chat_completion_invalid_request_error_no_model_async", + scoped_metrics=[("Llm/completion/OpenAI/create", 1)], + rollup_metrics=[("Llm/completion/OpenAI/create", 1)], + background_task=True, +) +@validate_custom_events(expected_events_on_no_model_error) +@validate_custom_event_count(count=3) +@background_task() +def test_chat_completion_invalid_request_error_no_model_async(loop, set_trace_info, async_openai_client): + with pytest.raises(TypeError): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + loop.run_until_complete( + async_openai_client.chat.completions.create( + messages=_test_openai_chat_completion_messages, temperature=0.7, max_tokens=100 + ) + ) + + +@reset_core_stats_engine() +@disabled_ai_monitoring_record_content_settings +@validate_error_trace_attributes( + callable_name(TypeError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": {}, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "Missing required arguments; Expected either ('messages' and 'model') or ('messages', 'model' and 'stream') arguments to be given", + } +) +@validate_transaction_metrics( + "test_chat_completion_error_v1:test_chat_completion_invalid_request_error_no_model_async_no_content", + scoped_metrics=[("Llm/completion/OpenAI/create", 1)], + rollup_metrics=[("Llm/completion/OpenAI/create", 1)], + background_task=True, +) +@validate_custom_events(events_sans_content(expected_events_on_no_model_error)) +@validate_custom_event_count(count=3) +@background_task() +def test_chat_completion_invalid_request_error_no_model_async_no_content(loop, set_trace_info, async_openai_client): + with pytest.raises(TypeError): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + loop.run_until_complete( + async_openai_client.chat.completions.create( + messages=_test_openai_chat_completion_messages, temperature=0.7, max_tokens=100 + ) + ) + + +expected_events_on_invalid_model_error = [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "span_id": None, + "trace_id": "trace-id", + "duration": None, # Response time varies each test run + "request.model": "does-not-exist", + "request.temperature": 0.7, + "request.max_tokens": 100, + "response.number_of_messages": 1, + "vendor": "openai", + "ingest_source": "Python", + "error": True, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, + "llm.conversation_id": "my-awesome-id", + "span_id": None, + "trace_id": "trace-id", + "content": "Model does not exist.", + "role": "user", + "completion_id": None, + "sequence": 0, + "vendor": "openai", + "ingest_source": "Python", + }, + ), +] + + +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(openai.NotFoundError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "error.code": "model_not_found", + "http.statusCode": 404, + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "The model `does-not-exist` does not exist", + } +) +@validate_transaction_metrics( + "test_chat_completion_error_v1:test_chat_completion_invalid_request_error_invalid_model", + scoped_metrics=[("Llm/completion/OpenAI/create", 1)], + rollup_metrics=[("Llm/completion/OpenAI/create", 1)], + background_task=True, +) +@validate_custom_events(expected_events_on_invalid_model_error) +@validate_custom_event_count(count=2) +@background_task() +def test_chat_completion_invalid_request_error_invalid_model(set_trace_info, sync_openai_client): + with pytest.raises(openai.NotFoundError): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + sync_openai_client.chat.completions.create( + model="does-not-exist", + messages=({"role": "user", "content": "Model does not exist."},), + temperature=0.7, + max_tokens=100, + ) + + +@dt_enabled +@reset_core_stats_engine() +@override_llm_token_callback_settings(llm_token_count_callback) +@validate_error_trace_attributes( + callable_name(openai.NotFoundError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "error.code": "model_not_found", + "http.statusCode": 404, + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "The model `does-not-exist` does not exist", + } +) +@validate_transaction_metrics( + "test_chat_completion_error_v1:test_chat_completion_invalid_request_error_invalid_model_with_token_count", + scoped_metrics=[("Llm/completion/OpenAI/create", 1)], + rollup_metrics=[("Llm/completion/OpenAI/create", 1)], + background_task=True, +) +@validate_custom_events(add_token_count_to_events(expected_events_on_invalid_model_error)) +@validate_custom_event_count(count=2) +@background_task() +def test_chat_completion_invalid_request_error_invalid_model_with_token_count(set_trace_info, sync_openai_client): + with pytest.raises(openai.NotFoundError): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + sync_openai_client.chat.completions.create( + model="does-not-exist", + messages=({"role": "user", "content": "Model does not exist."},), + temperature=0.7, + max_tokens=100, + ) + + +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(openai.NotFoundError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "error.code": "model_not_found", + "http.statusCode": 404, + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "The model `does-not-exist` does not exist", + } +) +@validate_transaction_metrics( + "test_chat_completion_error_v1:test_chat_completion_invalid_request_error_invalid_model_async", + scoped_metrics=[("Llm/completion/OpenAI/create", 1)], + rollup_metrics=[("Llm/completion/OpenAI/create", 1)], + background_task=True, +) +@validate_custom_events(expected_events_on_invalid_model_error) +@validate_custom_event_count(count=2) +@background_task() +def test_chat_completion_invalid_request_error_invalid_model_async(loop, set_trace_info, async_openai_client): + with pytest.raises(openai.NotFoundError): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + loop.run_until_complete( + async_openai_client.chat.completions.create( + model="does-not-exist", + messages=({"role": "user", "content": "Model does not exist."},), + temperature=0.7, + max_tokens=100, + ) + ) + + +@dt_enabled +@reset_core_stats_engine() +@override_llm_token_callback_settings(llm_token_count_callback) +@validate_error_trace_attributes( + callable_name(openai.NotFoundError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "error.code": "model_not_found", + "http.statusCode": 404, + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "The model `does-not-exist` does not exist", + } +) +@validate_transaction_metrics( + "test_chat_completion_error_v1:test_chat_completion_invalid_request_error_invalid_model_with_token_count_async", + scoped_metrics=[("Llm/completion/OpenAI/create", 1)], + rollup_metrics=[("Llm/completion/OpenAI/create", 1)], + background_task=True, +) +@validate_custom_events(add_token_count_to_events(expected_events_on_invalid_model_error)) +@validate_custom_event_count(count=2) +@background_task() +def test_chat_completion_invalid_request_error_invalid_model_with_token_count_async( + loop, set_trace_info, async_openai_client +): + with pytest.raises(openai.NotFoundError): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + loop.run_until_complete( + async_openai_client.chat.completions.create( + model="does-not-exist", + messages=({"role": "user", "content": "Model does not exist."},), + temperature=0.7, + max_tokens=100, + ) + ) + + +expected_events_on_wrong_api_key_error = [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, # UUID that varies with each run + "span_id": None, + "trace_id": "trace-id", + "duration": None, # Response time varies each test run + "request.model": "gpt-3.5-turbo", + "request.temperature": 0.7, + "request.max_tokens": 100, + "response.number_of_messages": 1, + "vendor": "openai", + "ingest_source": "Python", + "error": True, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, + "span_id": None, + "trace_id": "trace-id", + "content": "Invalid API key.", + "role": "user", + "completion_id": None, + "sequence": 0, + "vendor": "openai", + "ingest_source": "Python", + }, + ), +] + + +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(openai.AuthenticationError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "http.statusCode": 401, + "error.code": "invalid_api_key", + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "Incorrect API key provided: DEADBEEF. You can find your API key at https://platform.openai.com/account/api-keys.", + } +) +@validate_transaction_metrics( + "test_chat_completion_error_v1:test_chat_completion_wrong_api_key_error", + scoped_metrics=[("Llm/completion/OpenAI/create", 1)], + rollup_metrics=[("Llm/completion/OpenAI/create", 1)], + background_task=True, +) +@validate_custom_events(expected_events_on_wrong_api_key_error) +@validate_custom_event_count(count=2) +@background_task() +def test_chat_completion_wrong_api_key_error(monkeypatch, set_trace_info, sync_openai_client): + with pytest.raises(openai.AuthenticationError): + set_trace_info() + monkeypatch.setattr(sync_openai_client, "api_key", "DEADBEEF") + sync_openai_client.chat.completions.create( + model="gpt-3.5-turbo", + messages=({"role": "user", "content": "Invalid API key."},), + temperature=0.7, + max_tokens=100, + ) + + +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(openai.AuthenticationError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "http.statusCode": 401, + "error.code": "invalid_api_key", + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "Incorrect API key provided: DEADBEEF. You can find your API key at https://platform.openai.com/account/api-keys.", + } +) +@validate_transaction_metrics( + "test_chat_completion_error_v1:test_chat_completion_wrong_api_key_error_async", + scoped_metrics=[("Llm/completion/OpenAI/create", 1)], + rollup_metrics=[("Llm/completion/OpenAI/create", 1)], + background_task=True, +) +@validate_custom_events(expected_events_on_wrong_api_key_error) +@validate_custom_event_count(count=2) +@background_task() +def test_chat_completion_wrong_api_key_error_async(loop, monkeypatch, set_trace_info, async_openai_client): + with pytest.raises(openai.AuthenticationError): + set_trace_info() + monkeypatch.setattr(async_openai_client, "api_key", "DEADBEEF") + loop.run_until_complete( + async_openai_client.chat.completions.create( + model="gpt-3.5-turbo", + messages=({"role": "user", "content": "Invalid API key."},), + temperature=0.7, + max_tokens=100, + ) + ) diff --git a/tests/mlmodel_openai/test_chat_completion_stream.py b/tests/mlmodel_openai/test_chat_completion_stream.py new file mode 100644 index 000000000..18cee59ab --- /dev/null +++ b/tests/mlmodel_openai/test_chat_completion_stream.py @@ -0,0 +1,493 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import openai +from testing_support.fixtures import ( + override_llm_token_callback_settings, + reset_core_stats_engine, + validate_attributes, + validate_custom_event_count, +) +from testing_support.ml_testing_utils import ( # noqa: F401 + add_token_count_to_events, + disabled_ai_monitoring_record_content_settings, + disabled_ai_monitoring_settings, + disabled_ai_monitoring_streaming_settings, + events_sans_content, + events_sans_llm_metadata, + llm_token_count_callback, + set_trace_info, +) +from testing_support.validators.validate_custom_events import validate_custom_events +from testing_support.validators.validate_transaction_metrics import ( + validate_transaction_metrics, +) + +from newrelic.api.background_task import background_task +from newrelic.api.transaction import add_custom_attribute + +disabled_custom_insights_settings = {"custom_insights_events.enabled": False} + +_test_openai_chat_completion_messages = ( + {"role": "system", "content": "You are a scientist."}, + {"role": "user", "content": "What is 212 degrees Fahrenheit converted to Celsius?"}, +) + +chat_completion_recorded_events = [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "span_id": None, + "trace_id": "trace-id", + "request_id": "49dbbffbd3c3f4612aa48def69059ccd", + "duration": None, # Response time varies each test run + "request.model": "gpt-3.5-turbo", + "response.model": "gpt-3.5-turbo-0613", + "response.organization": "new-relic-nkmd8b", + "request.temperature": 0.7, + "request.max_tokens": 100, + "response.choices.finish_reason": "stop", + "response.headers.llmVersion": "2020-10-01", + "response.headers.ratelimitLimitRequests": 200, + "response.headers.ratelimitLimitTokens": 40000, + "response.headers.ratelimitResetTokens": "90ms", + "response.headers.ratelimitResetRequests": "7m12s", + "response.headers.ratelimitRemainingTokens": 39940, + "response.headers.ratelimitRemainingRequests": 199, + "vendor": "openai", + "ingest_source": "Python", + "response.number_of_messages": 3, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": "chatcmpl-87sb95K4EF2nuJRcTs43Tm9ntTemv-0", + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": "49dbbffbd3c3f4612aa48def69059ccd", + "span_id": None, + "trace_id": "trace-id", + "content": "You are a scientist.", + "role": "system", + "completion_id": None, + "sequence": 0, + "response.model": "gpt-3.5-turbo-0613", + "vendor": "openai", + "ingest_source": "Python", + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": "chatcmpl-87sb95K4EF2nuJRcTs43Tm9ntTemv-1", + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": "49dbbffbd3c3f4612aa48def69059ccd", + "span_id": None, + "trace_id": "trace-id", + "content": "What is 212 degrees Fahrenheit converted to Celsius?", + "role": "user", + "completion_id": None, + "sequence": 1, + "response.model": "gpt-3.5-turbo-0613", + "vendor": "openai", + "ingest_source": "Python", + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": "chatcmpl-87sb95K4EF2nuJRcTs43Tm9ntTemv-2", + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": "49dbbffbd3c3f4612aa48def69059ccd", + "span_id": None, + "trace_id": "trace-id", + "content": "212 degrees Fahrenheit is equal to 100 degrees Celsius.", + "role": "assistant", + "completion_id": None, + "sequence": 2, + "response.model": "gpt-3.5-turbo-0613", + "vendor": "openai", + "is_response": True, + "ingest_source": "Python", + }, + ), +] + + +@reset_core_stats_engine() +@validate_custom_events(chat_completion_recorded_events) +# One summary event, one system message, one user message, and one response message from the assistant +@validate_custom_event_count(count=4) +@validate_transaction_metrics( + name="test_chat_completion_stream:test_openai_chat_completion_sync_with_llm_metadata", + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_attributes("agent", ["llm"]) +@background_task() +def test_openai_chat_completion_sync_with_llm_metadata(set_trace_info): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") + + generator = openai.ChatCompletion.create( + model="gpt-3.5-turbo", + messages=_test_openai_chat_completion_messages, + temperature=0.7, + max_tokens=100, + stream=True, + ) + for resp in generator: + assert resp + + +@reset_core_stats_engine() +@disabled_ai_monitoring_record_content_settings +@validate_custom_events(events_sans_content(chat_completion_recorded_events)) +# One summary event, one system message, one user message, and one response message from the assistant +@validate_custom_event_count(count=4) +@validate_transaction_metrics( + name="test_chat_completion_stream:test_openai_chat_completion_sync_no_content", + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_attributes("agent", ["llm"]) +@background_task() +def test_openai_chat_completion_sync_no_content(set_trace_info): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + + generator = openai.ChatCompletion.create( + model="gpt-3.5-turbo", + messages=_test_openai_chat_completion_messages, + temperature=0.7, + max_tokens=100, + stream=True, + ) + for resp in generator: + assert resp + + +@reset_core_stats_engine() +@override_llm_token_callback_settings(llm_token_count_callback) +@validate_custom_events(add_token_count_to_events(chat_completion_recorded_events)) +# One summary event, one system message, one user message, and one response message from the assistant +@validate_custom_event_count(count=4) +@validate_transaction_metrics( + name="test_chat_completion_stream:test_openai_chat_completion_sync_with_token_count", + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_attributes("agent", ["llm"]) +@background_task() +def test_openai_chat_completion_sync_with_token_count(set_trace_info): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + + generator = openai.ChatCompletion.create( + model="gpt-3.5-turbo", + messages=_test_openai_chat_completion_messages, + temperature=0.7, + max_tokens=100, + stream=True, + ) + for resp in generator: + assert resp + + +@reset_core_stats_engine() +@validate_custom_events(events_sans_llm_metadata(chat_completion_recorded_events)) +# One summary event, one system message, one user message, and one response message from the assistant +@validate_custom_event_count(count=4) +@validate_transaction_metrics( + "test_chat_completion_stream:test_openai_chat_completion_sync_no_llm_metadata", + scoped_metrics=[("Llm/completion/OpenAI/create", 1)], + rollup_metrics=[("Llm/completion/OpenAI/create", 1)], + background_task=True, +) +@background_task() +def test_openai_chat_completion_sync_no_llm_metadata(set_trace_info): + set_trace_info() + + generator = openai.ChatCompletion.create( + model="gpt-3.5-turbo", + messages=_test_openai_chat_completion_messages, + temperature=0.7, + max_tokens=100, + stream=True, + ) + for resp in generator: + assert resp + + +@disabled_ai_monitoring_streaming_settings +@reset_core_stats_engine() +@validate_custom_event_count(count=0) +@validate_transaction_metrics( + "test_chat_completion_stream:test_openai_chat_completion_sync_ai_monitoring_streaming_disabled", + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + scoped_metrics=[("Llm/completion/OpenAI/create", 1)], + rollup_metrics=[("Llm/completion/OpenAI/create", 1)], + background_task=True, +) +@background_task() +def test_openai_chat_completion_sync_ai_monitoring_streaming_disabled(): + openai.ChatCompletion.create( + model="gpt-3.5-turbo", + messages=_test_openai_chat_completion_messages, + temperature=0.7, + max_tokens=100, + stream=True, + ) + + +@reset_core_stats_engine() +@validate_custom_event_count(count=0) +def test_openai_chat_completion_sync_outside_txn(): + openai.ChatCompletion.create( + model="gpt-3.5-turbo", + messages=_test_openai_chat_completion_messages, + temperature=0.7, + max_tokens=100, + stream=True, + ) + + +@disabled_ai_monitoring_settings +@reset_core_stats_engine() +@validate_custom_event_count(count=0) +@background_task() +def test_openai_chat_completion_sync_ai_monitoring_disabled(): + openai.ChatCompletion.create( + model="gpt-3.5-turbo", + messages=_test_openai_chat_completion_messages, + temperature=0.7, + max_tokens=100, + stream=True, + ) + + +@reset_core_stats_engine() +@validate_custom_events(events_sans_llm_metadata(chat_completion_recorded_events)) +@validate_custom_event_count(count=4) +@validate_transaction_metrics( + "test_chat_completion_stream:test_openai_chat_completion_async_no_llm_metadata", + scoped_metrics=[("Llm/completion/OpenAI/acreate", 1)], + rollup_metrics=[("Llm/completion/OpenAI/acreate", 1)], + background_task=True, +) +@background_task() +def test_openai_chat_completion_async_no_llm_metadata(loop, set_trace_info): + set_trace_info() + + async def consumer(): + generator = await openai.ChatCompletion.acreate( + model="gpt-3.5-turbo", + messages=_test_openai_chat_completion_messages, + temperature=0.7, + max_tokens=100, + stream=True, + ) + async for resp in generator: + assert resp + + loop.run_until_complete(consumer()) + + +@reset_core_stats_engine() +@validate_custom_events(chat_completion_recorded_events) +@validate_custom_event_count(count=4) +@validate_transaction_metrics( + "test_chat_completion_stream:test_openai_chat_completion_async_with_llm_metadata", + scoped_metrics=[("Llm/completion/OpenAI/acreate", 1)], + rollup_metrics=[("Llm/completion/OpenAI/acreate", 1)], + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_attributes("agent", ["llm"]) +@background_task() +def test_openai_chat_completion_async_with_llm_metadata(loop, set_trace_info): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") + + async def consumer(): + generator = await openai.ChatCompletion.acreate( + model="gpt-3.5-turbo", + messages=_test_openai_chat_completion_messages, + temperature=0.7, + max_tokens=100, + stream=True, + ) + async for resp in generator: + assert resp + + loop.run_until_complete(consumer()) + + +@reset_core_stats_engine() +@disabled_ai_monitoring_record_content_settings +@validate_custom_events(events_sans_content(chat_completion_recorded_events)) +@validate_custom_event_count(count=4) +@validate_transaction_metrics( + "test_chat_completion_stream:test_openai_chat_completion_async_no_content", + scoped_metrics=[("Llm/completion/OpenAI/acreate", 1)], + rollup_metrics=[("Llm/completion/OpenAI/acreate", 1)], + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_attributes("agent", ["llm"]) +@background_task() +def test_openai_chat_completion_async_no_content(loop, set_trace_info): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + + async def consumer(): + generator = await openai.ChatCompletion.acreate( + model="gpt-3.5-turbo", + messages=_test_openai_chat_completion_messages, + temperature=0.7, + max_tokens=100, + stream=True, + ) + async for resp in generator: + assert resp + + loop.run_until_complete(consumer()) + + +@reset_core_stats_engine() +@override_llm_token_callback_settings(llm_token_count_callback) +@validate_custom_events(add_token_count_to_events(chat_completion_recorded_events)) +@validate_custom_event_count(count=4) +@validate_transaction_metrics( + name="test_chat_completion_stream:test_openai_chat_completion_async_with_token_count", + scoped_metrics=[("Llm/completion/OpenAI/acreate", 1)], + rollup_metrics=[("Llm/completion/OpenAI/acreate", 1)], + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_attributes("agent", ["llm"]) +@background_task() +def test_openai_chat_completion_async_with_token_count(loop, set_trace_info): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + + async def consumer(): + generator = await openai.ChatCompletion.acreate( + model="gpt-3.5-turbo", + messages=_test_openai_chat_completion_messages, + temperature=0.7, + max_tokens=100, + stream=True, + ) + async for resp in generator: + assert resp + + loop.run_until_complete(consumer()) + + +@disabled_ai_monitoring_streaming_settings +@reset_core_stats_engine() +@validate_custom_event_count(count=0) +@validate_transaction_metrics( + name="test_chat_completion_stream:test_openai_chat_completion_async_ai_monitoring_streaming_disabled", + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + scoped_metrics=[("Llm/completion/OpenAI/acreate", 1)], + rollup_metrics=[("Llm/completion/OpenAI/acreate", 1)], + background_task=True, +) +@background_task() +def test_openai_chat_completion_async_ai_monitoring_streaming_disabled(loop): + async def consumer(): + generator = await openai.ChatCompletion.acreate( + model="gpt-3.5-turbo", + messages=_test_openai_chat_completion_messages, + temperature=0.7, + max_tokens=100, + stream=True, + ) + async for resp in generator: + assert resp + + loop.run_until_complete(consumer()) + + +@reset_core_stats_engine() +@validate_custom_event_count(count=0) +def test_openai_chat_completion_async_outside_transaction(loop): + async def consumer(): + generator = await openai.ChatCompletion.acreate( + model="gpt-3.5-turbo", + messages=_test_openai_chat_completion_messages, + temperature=0.7, + max_tokens=100, + stream=True, + ) + async for resp in generator: + assert resp + + loop.run_until_complete(consumer()) + + +@disabled_ai_monitoring_settings +@reset_core_stats_engine() +@validate_custom_event_count(count=0) +@background_task() +def test_openai_chat_completion_async_ai_monitoring_disabled(loop): + async def consumer(): + generator = await openai.ChatCompletion.acreate( + model="gpt-3.5-turbo", + messages=_test_openai_chat_completion_messages, + temperature=0.7, + max_tokens=100, + stream=True, + ) + async for resp in generator: + assert resp + + loop.run_until_complete(consumer()) + + +def test_openai_chat_completion_functions_marked_as_wrapped_for_sdk_compatibility(): + assert openai.ChatCompletion._nr_wrapped + assert openai.util.convert_to_openai_object._nr_wrapped diff --git a/tests/mlmodel_openai/test_chat_completion_stream_error.py b/tests/mlmodel_openai/test_chat_completion_stream_error.py new file mode 100644 index 000000000..01270491c --- /dev/null +++ b/tests/mlmodel_openai/test_chat_completion_stream_error.py @@ -0,0 +1,835 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import openai +import pytest +from testing_support.fixtures import ( + dt_enabled, + override_llm_token_callback_settings, + reset_core_stats_engine, + validate_custom_event_count, +) +from testing_support.ml_testing_utils import ( # noqa: F401 + add_token_count_to_events, + disabled_ai_monitoring_record_content_settings, + events_sans_content, + llm_token_count_callback, + set_trace_info, +) +from testing_support.validators.validate_custom_events import validate_custom_events +from testing_support.validators.validate_error_trace_attributes import ( + validate_error_trace_attributes, +) +from testing_support.validators.validate_span_events import validate_span_events +from testing_support.validators.validate_transaction_metrics import ( + validate_transaction_metrics, +) + +from newrelic.api.background_task import background_task +from newrelic.api.transaction import add_custom_attribute +from newrelic.common.object_names import callable_name + +_test_openai_chat_completion_messages = ( + {"role": "system", "content": "You are a scientist."}, + {"role": "user", "content": "What is 212 degrees Fahrenheit converted to Celsius?"}, +) + +expected_events_on_no_model_error = [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "span_id": None, + "trace_id": "trace-id", + "duration": None, # Response time varies each test run + "request.temperature": 0.7, + "request.max_tokens": 100, + "response.number_of_messages": 2, + "vendor": "openai", + "ingest_source": "Python", + "error": True, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, + "llm.conversation_id": "my-awesome-id", + "span_id": None, + "trace_id": "trace-id", + "content": "You are a scientist.", + "role": "system", + "completion_id": None, + "sequence": 0, + "vendor": "openai", + "ingest_source": "Python", + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, + "llm.conversation_id": "my-awesome-id", + "span_id": None, + "trace_id": "trace-id", + "content": "What is 212 degrees Fahrenheit converted to Celsius?", + "role": "user", + "completion_id": None, + "sequence": 1, + "vendor": "openai", + "ingest_source": "Python", + }, + ), +] + + +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(openai.InvalidRequestError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "error.param": "engine", + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "Must provide an 'engine' or 'model' parameter to create a ", + } +) +@validate_transaction_metrics( + "test_chat_completion_stream_error:test_chat_completion_invalid_request_error_no_model", + scoped_metrics=[("Llm/completion/OpenAI/create", 1)], + rollup_metrics=[("Llm/completion/OpenAI/create", 1)], + background_task=True, +) +@validate_custom_events(expected_events_on_no_model_error) +@validate_custom_event_count(count=3) +@background_task() +def test_chat_completion_invalid_request_error_no_model(set_trace_info): + with pytest.raises(openai.InvalidRequestError): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + generator = openai.ChatCompletion.create( + # no model provided, + messages=_test_openai_chat_completion_messages, + temperature=0.7, + max_tokens=100, + stream=True, + ) + for resp in generator: + assert resp + + +@dt_enabled +@reset_core_stats_engine() +@disabled_ai_monitoring_record_content_settings +@validate_error_trace_attributes( + callable_name(openai.InvalidRequestError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "error.param": "engine", + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "Must provide an 'engine' or 'model' parameter to create a ", + } +) +@validate_transaction_metrics( + "test_chat_completion_stream_error:test_chat_completion_invalid_request_error_no_model_no_content", + scoped_metrics=[("Llm/completion/OpenAI/create", 1)], + rollup_metrics=[("Llm/completion/OpenAI/create", 1)], + background_task=True, +) +@validate_custom_events(events_sans_content(expected_events_on_no_model_error)) +@validate_custom_event_count(count=3) +@background_task() +def test_chat_completion_invalid_request_error_no_model_no_content(set_trace_info): + with pytest.raises(openai.InvalidRequestError): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + generator = openai.ChatCompletion.create( + # no model provided, + messages=_test_openai_chat_completion_messages, + temperature=0.7, + max_tokens=100, + stream=True, + ) + for resp in generator: + assert resp + + +expected_events_on_invalid_model_error = [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "span_id": None, + "trace_id": "trace-id", + "duration": None, # Response time varies each test run + "request.model": "does-not-exist", + "request.temperature": 0.7, + "request.max_tokens": 100, + "response.number_of_messages": 1, + "vendor": "openai", + "ingest_source": "Python", + "error": True, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, + "llm.conversation_id": "my-awesome-id", + "span_id": None, + "trace_id": "trace-id", + "content": "Model does not exist.", + "role": "user", + "completion_id": None, + "sequence": 0, + "vendor": "openai", + "ingest_source": "Python", + }, + ), +] + + +@dt_enabled +@reset_core_stats_engine() +@override_llm_token_callback_settings(llm_token_count_callback) +@validate_error_trace_attributes( + callable_name(openai.InvalidRequestError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "error.code": "model_not_found", + "http.statusCode": 404, + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "The model `does-not-exist` does not exist", + } +) +@validate_transaction_metrics( + "test_chat_completion_stream_error:test_chat_completion_invalid_request_error_invalid_model_with_token_count", + scoped_metrics=[("Llm/completion/OpenAI/create", 1)], + rollup_metrics=[("Llm/completion/OpenAI/create", 1)], + background_task=True, +) +@validate_custom_events(add_token_count_to_events(expected_events_on_invalid_model_error)) +@validate_custom_event_count(count=2) +@background_task() +def test_chat_completion_invalid_request_error_invalid_model_with_token_count(set_trace_info): + with pytest.raises(openai.InvalidRequestError): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + generator = openai.ChatCompletion.create( + model="does-not-exist", + messages=({"role": "user", "content": "Model does not exist."},), + temperature=0.7, + max_tokens=100, + stream=True, + ) + for resp in generator: + assert resp + + +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(openai.InvalidRequestError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "error.code": "model_not_found", + "http.statusCode": 404, + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "The model `does-not-exist` does not exist", + } +) +@validate_transaction_metrics( + "test_chat_completion_stream_error:test_chat_completion_invalid_request_error_invalid_model", + scoped_metrics=[("Llm/completion/OpenAI/create", 1)], + rollup_metrics=[("Llm/completion/OpenAI/create", 1)], + background_task=True, +) +@validate_custom_events(expected_events_on_invalid_model_error) +@validate_custom_event_count(count=2) +@background_task() +def test_chat_completion_invalid_request_error_invalid_model(set_trace_info): + with pytest.raises(openai.InvalidRequestError): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + generator = openai.ChatCompletion.create( + model="does-not-exist", + messages=({"role": "user", "content": "Model does not exist."},), + temperature=0.7, + max_tokens=100, + stream=True, + ) + for resp in generator: + assert resp + + +expected_events_on_auth_error = [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "span_id": None, + "trace_id": "trace-id", + "duration": None, # Response time varies each test run + "request.model": "gpt-3.5-turbo", + "request.temperature": 0.7, + "request.max_tokens": 100, + "response.number_of_messages": 2, + "vendor": "openai", + "ingest_source": "Python", + "error": True, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, + "llm.conversation_id": "my-awesome-id", + "span_id": None, + "trace_id": "trace-id", + "content": "You are a scientist.", + "role": "system", + "completion_id": None, + "sequence": 0, + "vendor": "openai", + "ingest_source": "Python", + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, + "llm.conversation_id": "my-awesome-id", + "span_id": None, + "trace_id": "trace-id", + "content": "What is 212 degrees Fahrenheit converted to Celsius?", + "role": "user", + "completion_id": None, + "sequence": 1, + "vendor": "openai", + "ingest_source": "Python", + }, + ), +] + + +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(openai.error.AuthenticationError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": {}, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "No API key provided. You can set your API key in code using 'openai.api_key = ', or you can set the environment variable OPENAI_API_KEY=). If your API key is stored in a file, you can point the openai module at it with 'openai.api_key_path = '. You can generate API keys in the OpenAI web interface. See https://platform.openai.com/account/api-keys for details.", + } +) +@validate_transaction_metrics( + "test_chat_completion_stream_error:test_chat_completion_authentication_error", + scoped_metrics=[("Llm/completion/OpenAI/create", 1)], + rollup_metrics=[("Llm/completion/OpenAI/create", 1)], + background_task=True, +) +@validate_custom_events(expected_events_on_auth_error) +@validate_custom_event_count(count=3) +@background_task() +def test_chat_completion_authentication_error(monkeypatch, set_trace_info): + with pytest.raises(openai.error.AuthenticationError): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + monkeypatch.setattr(openai, "api_key", None) # openai.api_key = None + generator = openai.ChatCompletion.create( + model="gpt-3.5-turbo", + messages=_test_openai_chat_completion_messages, + temperature=0.7, + max_tokens=100, + stream=True, + ) + for resp in generator: + assert resp + + +expected_events_on_wrong_api_key_error = [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, # UUID that varies with each run + "span_id": None, + "trace_id": "trace-id", + "duration": None, # Response time varies each test run + "request.model": "gpt-3.5-turbo", + "request.temperature": 0.7, + "request.max_tokens": 100, + "response.number_of_messages": 1, + "vendor": "openai", + "ingest_source": "Python", + "error": True, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, + "span_id": None, + "trace_id": "trace-id", + "content": "Invalid API key.", + "role": "user", + "completion_id": None, + "sequence": 0, + "vendor": "openai", + "ingest_source": "Python", + }, + ), +] + + +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(openai.error.AuthenticationError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "http.statusCode": 401, + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "Incorrect API key provided: DEADBEEF. You can find your API key at https://platform.openai.com/account/api-keys.", + } +) +@validate_transaction_metrics( + "test_chat_completion_stream_error:test_chat_completion_wrong_api_key_error", + scoped_metrics=[("Llm/completion/OpenAI/create", 1)], + rollup_metrics=[("Llm/completion/OpenAI/create", 1)], + background_task=True, +) +@validate_custom_events(expected_events_on_wrong_api_key_error) +@validate_custom_event_count(count=2) +@background_task() +def test_chat_completion_wrong_api_key_error(monkeypatch, set_trace_info): + with pytest.raises(openai.error.AuthenticationError): + set_trace_info() + monkeypatch.setattr(openai, "api_key", "DEADBEEF") + generator = openai.ChatCompletion.create( + model="gpt-3.5-turbo", + messages=({"role": "user", "content": "Invalid API key."},), + temperature=0.7, + max_tokens=100, + stream=True, + ) + for resp in generator: + assert resp + + +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(openai.InvalidRequestError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "error.param": "engine", + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "Must provide an 'engine' or 'model' parameter to create a ", + } +) +@validate_transaction_metrics( + "test_chat_completion_stream_error:test_chat_completion_invalid_request_error_no_model_async", + scoped_metrics=[("Llm/completion/OpenAI/acreate", 1)], + rollup_metrics=[("Llm/completion/OpenAI/acreate", 1)], + background_task=True, +) +@validate_custom_events(expected_events_on_no_model_error) +@validate_custom_event_count(count=3) +@background_task() +def test_chat_completion_invalid_request_error_no_model_async(loop, set_trace_info): + with pytest.raises(openai.InvalidRequestError): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + loop.run_until_complete( + openai.ChatCompletion.acreate( + # no model provided, + messages=_test_openai_chat_completion_messages, + temperature=0.7, + max_tokens=100, + stream=True, + ) + ) + + +@dt_enabled +@reset_core_stats_engine() +@disabled_ai_monitoring_record_content_settings +@validate_error_trace_attributes( + callable_name(openai.InvalidRequestError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "error.param": "engine", + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "Must provide an 'engine' or 'model' parameter to create a ", + } +) +@validate_transaction_metrics( + "test_chat_completion_stream_error:test_chat_completion_invalid_request_error_no_model_async_no_content", + scoped_metrics=[("Llm/completion/OpenAI/acreate", 1)], + rollup_metrics=[("Llm/completion/OpenAI/acreate", 1)], + background_task=True, +) +@validate_custom_events(events_sans_content(expected_events_on_no_model_error)) +@validate_custom_event_count(count=3) +@background_task() +def test_chat_completion_invalid_request_error_no_model_async_no_content(loop, set_trace_info): + with pytest.raises(openai.InvalidRequestError): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + loop.run_until_complete( + openai.ChatCompletion.acreate( + # no model provided, + messages=_test_openai_chat_completion_messages, + temperature=0.7, + max_tokens=100, + stream=True, + ) + ) + + +@dt_enabled +@reset_core_stats_engine() +@override_llm_token_callback_settings(llm_token_count_callback) +@validate_error_trace_attributes( + callable_name(openai.InvalidRequestError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "error.code": "model_not_found", + "http.statusCode": 404, + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "The model `does-not-exist` does not exist", + } +) +@validate_transaction_metrics( + "test_chat_completion_stream_error:test_chat_completion_invalid_request_error_invalid_model_with_token_count_async", + scoped_metrics=[("Llm/completion/OpenAI/acreate", 1)], + rollup_metrics=[("Llm/completion/OpenAI/acreate", 1)], + background_task=True, +) +@validate_custom_events(add_token_count_to_events(expected_events_on_invalid_model_error)) +@validate_custom_event_count(count=2) +@background_task() +def test_chat_completion_invalid_request_error_invalid_model_with_token_count_async(loop, set_trace_info): + with pytest.raises(openai.InvalidRequestError): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + loop.run_until_complete( + openai.ChatCompletion.acreate( + model="does-not-exist", + messages=({"role": "user", "content": "Model does not exist."},), + temperature=0.7, + max_tokens=100, + stream=True, + ) + ) + + +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(openai.InvalidRequestError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "error.code": "model_not_found", + "http.statusCode": 404, + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "The model `does-not-exist` does not exist", + } +) +@validate_transaction_metrics( + "test_chat_completion_stream_error:test_chat_completion_invalid_request_error_invalid_model_async", + scoped_metrics=[("Llm/completion/OpenAI/acreate", 1)], + rollup_metrics=[("Llm/completion/OpenAI/acreate", 1)], + background_task=True, +) +@validate_custom_events(expected_events_on_invalid_model_error) +@validate_custom_event_count(count=2) +@background_task() +def test_chat_completion_invalid_request_error_invalid_model_async(loop, set_trace_info): + with pytest.raises(openai.InvalidRequestError): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + loop.run_until_complete( + openai.ChatCompletion.acreate( + model="does-not-exist", + messages=({"role": "user", "content": "Model does not exist."},), + temperature=0.7, + max_tokens=100, + stream=True, + ) + ) + + +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(openai.error.AuthenticationError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": {}, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "No API key provided. You can set your API key in code using 'openai.api_key = ', or you can set the environment variable OPENAI_API_KEY=). If your API key is stored in a file, you can point the openai module at it with 'openai.api_key_path = '. You can generate API keys in the OpenAI web interface. See https://platform.openai.com/account/api-keys for details.", + } +) +@validate_transaction_metrics( + "test_chat_completion_stream_error:test_chat_completion_authentication_error_async", + scoped_metrics=[("Llm/completion/OpenAI/acreate", 1)], + rollup_metrics=[("Llm/completion/OpenAI/acreate", 1)], + background_task=True, +) +@validate_custom_events(expected_events_on_auth_error) +@validate_custom_event_count(count=3) +@background_task() +def test_chat_completion_authentication_error_async(loop, monkeypatch, set_trace_info): + with pytest.raises(openai.error.AuthenticationError): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + monkeypatch.setattr(openai, "api_key", None) # openai.api_key = None + loop.run_until_complete( + openai.ChatCompletion.acreate( + model="gpt-3.5-turbo", + messages=_test_openai_chat_completion_messages, + temperature=0.7, + max_tokens=100, + stream=True, + ) + ) + + +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(openai.error.AuthenticationError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "http.statusCode": 401, + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "Incorrect API key provided: DEADBEEF. You can find your API key at https://platform.openai.com/account/api-keys.", + } +) +@validate_transaction_metrics( + "test_chat_completion_stream_error:test_chat_completion_wrong_api_key_error_async", + scoped_metrics=[("Llm/completion/OpenAI/acreate", 1)], + rollup_metrics=[("Llm/completion/OpenAI/acreate", 1)], + background_task=True, +) +@validate_custom_events(expected_events_on_wrong_api_key_error) +@validate_custom_event_count(count=2) +@background_task() +def test_chat_completion_wrong_api_key_error_async(loop, monkeypatch, set_trace_info): + with pytest.raises(openai.error.AuthenticationError): + set_trace_info() + monkeypatch.setattr(openai, "api_key", "DEADBEEF") + loop.run_until_complete( + openai.ChatCompletion.acreate( + model="gpt-3.5-turbo", + messages=({"role": "user", "content": "Invalid API key."},), + temperature=0.7, + max_tokens=100, + stream=True, + ) + ) + + +expected_events_stream_parsing_error = [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, # UUID that varies with each run + "span_id": None, + "trace_id": "trace-id", + "duration": None, # Response time varies each test run + "request.model": "gpt-3.5-turbo", + "response.organization": "new-relic-nkmd8b", + "request.temperature": 0.7, + "request.max_tokens": 100, + "response.number_of_messages": 1, + "vendor": "openai", + "ingest_source": "Python", + "error": True, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, + "span_id": None, + "trace_id": "trace-id", + "content": "Stream parsing error.", + "role": "user", + "completion_id": None, + "sequence": 0, + "vendor": "openai", + "ingest_source": "Python", + }, + ), +] + + +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(openai.error.APIError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "http.statusCode": 200, + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": 'HTTP code 200 from API ({"id": "chatcmpl-87sb95K4EF2nuJRcTs43Tm9ntTemv", "object": "chat.completion.chunk", "created": 1706565311, "model": "gpt-3.5-turbo-0613", "system_fingerprint": null, "choices": [{"index": 0, "delta": {"role": "assistant", "content": ""}, "logprobs": null, "finish_reason": null}]}data: {"id": "chatcmpl-87sb95K4EF2nuJRcTs43Tm9ntTemv", "object": "chat.completion.chunk", "created": 1706565311, "model": "gpt-3.5-turbo-0613", "system_fingerprint": null, "choices": [{"index": 0, "delta": {"content": "212"}, "logprobs": null, "finish_reason": null}]})', + } +) +@validate_transaction_metrics( + "test_chat_completion_stream_error:test_chat_completion_stream_parsing_error_async", + scoped_metrics=[("Llm/completion/OpenAI/acreate", 1)], + rollup_metrics=[("Llm/completion/OpenAI/acreate", 1)], + background_task=True, +) +@validate_custom_events(expected_events_stream_parsing_error) +@validate_custom_event_count(count=2) +@background_task() +def test_chat_completion_stream_parsing_error_async(loop, monkeypatch, set_trace_info): + with pytest.raises(openai.error.APIError): + set_trace_info() + + async def consumer(): + generator = await openai.ChatCompletion.acreate( + model="gpt-3.5-turbo", + messages=({"role": "user", "content": "Stream parsing error."},), + temperature=0.7, + max_tokens=100, + stream=True, + ) + async for resp in generator: + assert resp + + loop.run_until_complete(consumer()) + + +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(openai.error.APIError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "http.statusCode": 200, + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": 'HTTP code 200 from API ({"id": "chatcmpl-87sb95K4EF2nuJRcTs43Tm9ntTemv", "object": "chat.completion.chunk", "created": 1706565311, "model": "gpt-3.5-turbo-0613", "system_fingerprint": null, "choices": [{"index": 0, "delta": {"role": "assistant", "content": ""}, "logprobs": null, "finish_reason": null}]}data: {"id": "chatcmpl-87sb95K4EF2nuJRcTs43Tm9ntTemv", "object": "chat.completion.chunk", "created": 1706565311, "model": "gpt-3.5-turbo-0613", "system_fingerprint": null, "choices": [{"index": 0, "delta": {"content": "212"}, "logprobs": null, "finish_reason": null}]})', + } +) +@validate_transaction_metrics( + "test_chat_completion_stream_error:test_chat_completion_stream_parsing_error", + scoped_metrics=[("Llm/completion/OpenAI/create", 1)], + rollup_metrics=[("Llm/completion/OpenAI/create", 1)], + background_task=True, +) +@validate_custom_events(expected_events_stream_parsing_error) +@validate_custom_event_count(count=2) +@background_task() +def test_chat_completion_stream_parsing_error(monkeypatch, set_trace_info): + with pytest.raises(openai.error.APIError): + set_trace_info() + + generator = openai.ChatCompletion.create( + model="gpt-3.5-turbo", + messages=({"role": "user", "content": "Stream parsing error."},), + temperature=0.7, + max_tokens=100, + stream=True, + ) + for resp in generator: + assert resp diff --git a/tests/mlmodel_openai/test_chat_completion_stream_error_v1.py b/tests/mlmodel_openai/test_chat_completion_stream_error_v1.py new file mode 100644 index 000000000..e9bba66b4 --- /dev/null +++ b/tests/mlmodel_openai/test_chat_completion_stream_error_v1.py @@ -0,0 +1,585 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import openai +import pytest +from testing_support.fixtures import ( + dt_enabled, + override_llm_token_callback_settings, + reset_core_stats_engine, + validate_custom_event_count, +) +from testing_support.ml_testing_utils import ( # noqa: F401 + add_token_count_to_events, + disabled_ai_monitoring_record_content_settings, + events_sans_content, + llm_token_count_callback, + set_trace_info, +) +from testing_support.validators.validate_custom_events import validate_custom_events +from testing_support.validators.validate_error_trace_attributes import ( + validate_error_trace_attributes, +) +from testing_support.validators.validate_span_events import validate_span_events +from testing_support.validators.validate_transaction_metrics import ( + validate_transaction_metrics, +) + +from newrelic.api.background_task import background_task +from newrelic.api.transaction import add_custom_attribute +from newrelic.common.object_names import callable_name + +_test_openai_chat_completion_messages = ( + {"role": "system", "content": "You are a scientist."}, + {"role": "user", "content": "What is 212 degrees Fahrenheit converted to Celsius?"}, +) + +expected_events_on_no_model_error = [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "span_id": None, + "trace_id": "trace-id", + "duration": None, # Response time varies each test run + "request.temperature": 0.7, + "request.max_tokens": 100, + "response.number_of_messages": 2, + "vendor": "openai", + "ingest_source": "Python", + "error": True, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, + "llm.conversation_id": "my-awesome-id", + "span_id": None, + "trace_id": "trace-id", + "content": "You are a scientist.", + "role": "system", + "completion_id": None, + "sequence": 0, + "vendor": "openai", + "ingest_source": "Python", + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, + "llm.conversation_id": "my-awesome-id", + "span_id": None, + "trace_id": "trace-id", + "content": "What is 212 degrees Fahrenheit converted to Celsius?", + "role": "user", + "completion_id": None, + "sequence": 1, + "vendor": "openai", + "ingest_source": "Python", + }, + ), +] + + +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(TypeError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": {}, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "Missing required arguments; Expected either ('messages' and 'model') or ('messages', 'model' and 'stream') arguments to be given", + } +) +@validate_transaction_metrics( + "test_chat_completion_stream_error_v1:test_chat_completion_invalid_request_error_no_model", + scoped_metrics=[("Llm/completion/OpenAI/create", 1)], + rollup_metrics=[("Llm/completion/OpenAI/create", 1)], + background_task=True, +) +@validate_custom_events(expected_events_on_no_model_error) +@validate_custom_event_count(count=3) +@background_task() +def test_chat_completion_invalid_request_error_no_model(set_trace_info, sync_openai_client): + with pytest.raises(TypeError): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + generator = sync_openai_client.chat.completions.create( + messages=_test_openai_chat_completion_messages, temperature=0.7, max_tokens=100, stream=True + ) + for resp in generator: + assert resp + + +@dt_enabled +@disabled_ai_monitoring_record_content_settings +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(TypeError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": {}, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "Missing required arguments; Expected either ('messages' and 'model') or ('messages', 'model' and 'stream') arguments to be given", + } +) +@validate_transaction_metrics( + "test_chat_completion_stream_error_v1:test_chat_completion_invalid_request_error_no_model_no_content", + scoped_metrics=[("Llm/completion/OpenAI/create", 1)], + rollup_metrics=[("Llm/completion/OpenAI/create", 1)], + background_task=True, +) +@validate_custom_events(events_sans_content(expected_events_on_no_model_error)) +@validate_custom_event_count(count=3) +@background_task() +def test_chat_completion_invalid_request_error_no_model_no_content(set_trace_info, sync_openai_client): + with pytest.raises(TypeError): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + generator = sync_openai_client.chat.completions.create( + messages=_test_openai_chat_completion_messages, temperature=0.7, max_tokens=100, stream=True + ) + for resp in generator: + assert resp + + +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(TypeError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": {}, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "Missing required arguments; Expected either ('messages' and 'model') or ('messages', 'model' and 'stream') arguments to be given", + } +) +@validate_transaction_metrics( + "test_chat_completion_stream_error_v1:test_chat_completion_invalid_request_error_no_model_async", + scoped_metrics=[("Llm/completion/OpenAI/create", 1)], + rollup_metrics=[("Llm/completion/OpenAI/create", 1)], + background_task=True, +) +@validate_custom_events(expected_events_on_no_model_error) +@validate_custom_event_count(count=3) +@background_task() +def test_chat_completion_invalid_request_error_no_model_async(loop, set_trace_info, async_openai_client): + with pytest.raises(TypeError): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + + async def consumer(): + generator = await async_openai_client.chat.completions.create( + messages=_test_openai_chat_completion_messages, temperature=0.7, max_tokens=100, stream=True + ) + async for resp in generator: + assert resp + + loop.run_until_complete(consumer()) + + +@dt_enabled +@reset_core_stats_engine() +@disabled_ai_monitoring_record_content_settings +@validate_error_trace_attributes( + callable_name(TypeError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": {}, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "Missing required arguments; Expected either ('messages' and 'model') or ('messages', 'model' and 'stream') arguments to be given", + } +) +@validate_transaction_metrics( + "test_chat_completion_stream_error_v1:test_chat_completion_invalid_request_error_no_model_async_no_content", + scoped_metrics=[("Llm/completion/OpenAI/create", 1)], + rollup_metrics=[("Llm/completion/OpenAI/create", 1)], + background_task=True, +) +@validate_custom_events(events_sans_content(expected_events_on_no_model_error)) +@validate_custom_event_count(count=3) +@background_task() +def test_chat_completion_invalid_request_error_no_model_async_no_content(loop, set_trace_info, async_openai_client): + with pytest.raises(TypeError): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + + async def consumer(): + generator = await async_openai_client.chat.completions.create( + messages=_test_openai_chat_completion_messages, temperature=0.7, max_tokens=100, stream=True + ) + async for resp in generator: + assert resp + + loop.run_until_complete(consumer()) + + +expected_events_on_invalid_model_error = [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "span_id": None, + "trace_id": "trace-id", + "duration": None, # Response time varies each test run + "request.model": "does-not-exist", + "request.temperature": 0.7, + "request.max_tokens": 100, + "response.number_of_messages": 1, + "vendor": "openai", + "ingest_source": "Python", + "error": True, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, + "llm.conversation_id": "my-awesome-id", + "span_id": None, + "trace_id": "trace-id", + "content": "Model does not exist.", + "role": "user", + "completion_id": None, + "sequence": 0, + "vendor": "openai", + "ingest_source": "Python", + }, + ), +] + + +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(openai.NotFoundError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "error.code": "model_not_found", + "http.statusCode": 404, + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "The model `does-not-exist` does not exist", + } +) +@validate_transaction_metrics( + "test_chat_completion_stream_error_v1:test_chat_completion_invalid_request_error_invalid_model", + scoped_metrics=[("Llm/completion/OpenAI/create", 1)], + rollup_metrics=[("Llm/completion/OpenAI/create", 1)], + background_task=True, +) +@validate_custom_events(expected_events_on_invalid_model_error) +@validate_custom_event_count(count=2) +@background_task() +def test_chat_completion_invalid_request_error_invalid_model(set_trace_info, sync_openai_client): + with pytest.raises(openai.NotFoundError): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + generator = sync_openai_client.chat.completions.create( + model="does-not-exist", + messages=({"role": "user", "content": "Model does not exist."},), + temperature=0.7, + max_tokens=100, + stream=True, + ) + for resp in generator: + assert resp + + +@dt_enabled +@reset_core_stats_engine() +@override_llm_token_callback_settings(llm_token_count_callback) +@validate_error_trace_attributes( + callable_name(openai.NotFoundError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "error.code": "model_not_found", + "http.statusCode": 404, + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "The model `does-not-exist` does not exist", + } +) +@validate_transaction_metrics( + "test_chat_completion_stream_error_v1:test_chat_completion_invalid_request_error_invalid_model_with_token_count", + scoped_metrics=[("Llm/completion/OpenAI/create", 1)], + rollup_metrics=[("Llm/completion/OpenAI/create", 1)], + background_task=True, +) +@validate_custom_events(add_token_count_to_events(expected_events_on_invalid_model_error)) +@validate_custom_event_count(count=2) +@background_task() +def test_chat_completion_invalid_request_error_invalid_model_with_token_count(set_trace_info, sync_openai_client): + with pytest.raises(openai.NotFoundError): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + + generator = sync_openai_client.chat.completions.create( + model="does-not-exist", + messages=({"role": "user", "content": "Model does not exist."},), + temperature=0.7, + max_tokens=100, + stream=True, + ) + for resp in generator: + assert resp + + +@dt_enabled +@reset_core_stats_engine() +@override_llm_token_callback_settings(llm_token_count_callback) +@validate_error_trace_attributes( + callable_name(openai.NotFoundError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "error.code": "model_not_found", + "http.statusCode": 404, + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "The model `does-not-exist` does not exist", + } +) +@validate_transaction_metrics( + "test_chat_completion_stream_error_v1:test_chat_completion_invalid_request_error_invalid_model_async_with_token_count", + scoped_metrics=[("Llm/completion/OpenAI/create", 1)], + rollup_metrics=[("Llm/completion/OpenAI/create", 1)], + background_task=True, +) +@validate_custom_events(add_token_count_to_events(expected_events_on_invalid_model_error)) +@validate_custom_event_count(count=2) +@background_task() +def test_chat_completion_invalid_request_error_invalid_model_async_with_token_count( + loop, set_trace_info, async_openai_client +): + with pytest.raises(openai.NotFoundError): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + + async def consumer(): + generator = await async_openai_client.chat.completions.create( + model="does-not-exist", + messages=({"role": "user", "content": "Model does not exist."},), + temperature=0.7, + max_tokens=100, + stream=True, + ) + async for resp in generator: + assert resp + + loop.run_until_complete(consumer()) + + +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(openai.NotFoundError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "error.code": "model_not_found", + "http.statusCode": 404, + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "The model `does-not-exist` does not exist", + } +) +@validate_transaction_metrics( + "test_chat_completion_stream_error_v1:test_chat_completion_invalid_request_error_invalid_model_async", + scoped_metrics=[("Llm/completion/OpenAI/create", 1)], + rollup_metrics=[("Llm/completion/OpenAI/create", 1)], + background_task=True, +) +@validate_custom_events(expected_events_on_invalid_model_error) +@validate_custom_event_count(count=2) +@background_task() +def test_chat_completion_invalid_request_error_invalid_model_async(loop, set_trace_info, async_openai_client): + with pytest.raises(openai.NotFoundError): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + + async def consumer(): + generator = await async_openai_client.chat.completions.create( + model="does-not-exist", + messages=({"role": "user", "content": "Model does not exist."},), + temperature=0.7, + max_tokens=100, + stream=True, + ) + async for resp in generator: + assert resp + + loop.run_until_complete(consumer()) + + +expected_events_on_wrong_api_key_error = [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, # UUID that varies with each run + "span_id": None, + "trace_id": "trace-id", + "duration": None, # Response time varies each test run + "request.model": "gpt-3.5-turbo", + "request.temperature": 0.7, + "request.max_tokens": 100, + "response.number_of_messages": 1, + "vendor": "openai", + "ingest_source": "Python", + "error": True, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, + "span_id": None, + "trace_id": "trace-id", + "content": "Invalid API key.", + "role": "user", + "completion_id": None, + "sequence": 0, + "vendor": "openai", + "ingest_source": "Python", + }, + ), +] + + +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(openai.AuthenticationError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "http.statusCode": 401, + "error.code": "invalid_api_key", + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "Incorrect API key provided: DEADBEEF. You can find your API key at https://platform.openai.com/account/api-keys.", + } +) +@validate_transaction_metrics( + "test_chat_completion_stream_error_v1:test_chat_completion_wrong_api_key_error", + scoped_metrics=[("Llm/completion/OpenAI/create", 1)], + rollup_metrics=[("Llm/completion/OpenAI/create", 1)], + background_task=True, +) +@validate_custom_events(expected_events_on_wrong_api_key_error) +@validate_custom_event_count(count=2) +@background_task() +def test_chat_completion_wrong_api_key_error(monkeypatch, set_trace_info, sync_openai_client): + with pytest.raises(openai.AuthenticationError): + set_trace_info() + monkeypatch.setattr(sync_openai_client, "api_key", "DEADBEEF") + generator = sync_openai_client.chat.completions.create( + model="gpt-3.5-turbo", + messages=({"role": "user", "content": "Invalid API key."},), + temperature=0.7, + max_tokens=100, + stream=True, + ) + for resp in generator: + assert resp + + +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(openai.AuthenticationError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "http.statusCode": 401, + "error.code": "invalid_api_key", + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "Incorrect API key provided: DEADBEEF. You can find your API key at https://platform.openai.com/account/api-keys.", + } +) +@validate_transaction_metrics( + "test_chat_completion_stream_error_v1:test_chat_completion_wrong_api_key_error_async", + scoped_metrics=[("Llm/completion/OpenAI/create", 1)], + rollup_metrics=[("Llm/completion/OpenAI/create", 1)], + background_task=True, +) +@validate_custom_events(expected_events_on_wrong_api_key_error) +@validate_custom_event_count(count=2) +@background_task() +def test_chat_completion_wrong_api_key_error_async(loop, monkeypatch, set_trace_info, async_openai_client): + with pytest.raises(openai.AuthenticationError): + set_trace_info() + monkeypatch.setattr(async_openai_client, "api_key", "DEADBEEF") + + async def consumer(): + generator = await async_openai_client.chat.completions.create( + model="gpt-3.5-turbo", + messages=({"role": "user", "content": "Invalid API key."},), + temperature=0.7, + max_tokens=100, + stream=True, + ) + async for resp in generator: + assert resp + + loop.run_until_complete(consumer()) diff --git a/tests/mlmodel_openai/test_chat_completion_stream_v1.py b/tests/mlmodel_openai/test_chat_completion_stream_v1.py new file mode 100644 index 000000000..01b2a09e6 --- /dev/null +++ b/tests/mlmodel_openai/test_chat_completion_stream_v1.py @@ -0,0 +1,498 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import openai +from testing_support.fixtures import ( + override_llm_token_callback_settings, + reset_core_stats_engine, + validate_attributes, + validate_custom_event_count, +) +from testing_support.ml_testing_utils import ( # noqa: F401 + add_token_count_to_events, + disabled_ai_monitoring_record_content_settings, + disabled_ai_monitoring_settings, + disabled_ai_monitoring_streaming_settings, + events_sans_content, + events_sans_llm_metadata, + llm_token_count_callback, + set_trace_info, +) +from testing_support.validators.validate_custom_events import validate_custom_events +from testing_support.validators.validate_transaction_metrics import ( + validate_transaction_metrics, +) + +from newrelic.api.background_task import background_task +from newrelic.api.transaction import add_custom_attribute + +_test_openai_chat_completion_messages = ( + {"role": "system", "content": "You are a scientist."}, + {"role": "user", "content": "What is 212 degrees Fahrenheit converted to Celsius?"}, +) + +chat_completion_recorded_events = [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "span_id": None, + "trace_id": "trace-id", + "request_id": "f8d0f53b6881c5c0a3698e55f8f410ac", + "duration": None, # Response time varies each test run + "request.model": "gpt-3.5-turbo", + "response.model": "gpt-3.5-turbo-0613", + "response.organization": "new-relic-nkmd8b", + # Usage tokens aren't available when streaming. + "request.temperature": 0.7, + "request.max_tokens": 100, + "response.choices.finish_reason": "stop", + "response.headers.llmVersion": "2020-10-01", + "response.headers.ratelimitLimitRequests": 200, + "response.headers.ratelimitLimitTokens": 40000, + "response.headers.ratelimitResetTokens": "180ms", + "response.headers.ratelimitResetRequests": "11m32.334s", + "response.headers.ratelimitRemainingTokens": 39880, + "response.headers.ratelimitRemainingRequests": 198, + "vendor": "openai", + "ingest_source": "Python", + "response.number_of_messages": 3, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": "chatcmpl-8TJ9dS50zgQM7XicE8PLnCyEihRug-0", + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": "f8d0f53b6881c5c0a3698e55f8f410ac", + "span_id": None, + "trace_id": "trace-id", + "content": "You are a scientist.", + "role": "system", + "completion_id": None, + "sequence": 0, + "response.model": "gpt-3.5-turbo-0613", + "vendor": "openai", + "ingest_source": "Python", + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": "chatcmpl-8TJ9dS50zgQM7XicE8PLnCyEihRug-1", + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": "f8d0f53b6881c5c0a3698e55f8f410ac", + "span_id": None, + "trace_id": "trace-id", + "content": "What is 212 degrees Fahrenheit converted to Celsius?", + "role": "user", + "completion_id": None, + "sequence": 1, + "response.model": "gpt-3.5-turbo-0613", + "vendor": "openai", + "ingest_source": "Python", + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": "chatcmpl-8TJ9dS50zgQM7XicE8PLnCyEihRug-2", + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": "f8d0f53b6881c5c0a3698e55f8f410ac", + "span_id": None, + "trace_id": "trace-id", + "content": "212 degrees Fahrenheit is equal to 100 degrees Celsius.", + "role": "assistant", + "completion_id": None, + "sequence": 2, + "response.model": "gpt-3.5-turbo-0613", + "vendor": "openai", + "is_response": True, + "ingest_source": "Python", + }, + ), +] + + +@reset_core_stats_engine() +@validate_custom_events(chat_completion_recorded_events) +# One summary event, one system message, one user message, and one response message from the assistant +# @validate_custom_event_count(count=4) +@validate_transaction_metrics( + name="test_chat_completion_stream_v1:test_openai_chat_completion_sync_with_llm_metadata", + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_attributes("agent", ["llm"]) +@background_task() +def test_openai_chat_completion_sync_with_llm_metadata(set_trace_info, sync_openai_client): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") + + generator = sync_openai_client.chat.completions.create( + model="gpt-3.5-turbo", + messages=_test_openai_chat_completion_messages, + temperature=0.7, + max_tokens=100, + stream=True, + ) + + for resp in generator: + assert resp + + +@reset_core_stats_engine() +@disabled_ai_monitoring_record_content_settings +@validate_custom_events(events_sans_content(chat_completion_recorded_events)) +# One summary event, one system message, one user message, and one response message from the assistant +# @validate_custom_event_count(count=4) +@validate_transaction_metrics( + name="test_chat_completion_stream_v1:test_openai_chat_completion_sync_no_content", + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_attributes("agent", ["llm"]) +@background_task() +def test_openai_chat_completion_sync_no_content(set_trace_info, sync_openai_client): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + + generator = sync_openai_client.chat.completions.create( + model="gpt-3.5-turbo", + messages=_test_openai_chat_completion_messages, + temperature=0.7, + max_tokens=100, + stream=True, + ) + + for resp in generator: + assert resp + + +@reset_core_stats_engine() +@override_llm_token_callback_settings(llm_token_count_callback) +@validate_custom_events(add_token_count_to_events(chat_completion_recorded_events)) +# One summary event, one system message, one user message, and one response message from the assistant +@validate_custom_event_count(count=4) +@validate_transaction_metrics( + name="test_chat_completion_stream_v1:test_openai_chat_completion_sync_in_txn_with_llm_metadata_with_token_count", + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_attributes("agent", ["llm"]) +@background_task() +def test_openai_chat_completion_sync_in_txn_with_llm_metadata_with_token_count(set_trace_info, sync_openai_client): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + + generator = sync_openai_client.chat.completions.create( + model="gpt-3.5-turbo", + messages=_test_openai_chat_completion_messages, + temperature=0.7, + max_tokens=100, + stream=True, + ) + for resp in generator: + assert resp + + +@reset_core_stats_engine() +@validate_custom_events(events_sans_llm_metadata(chat_completion_recorded_events)) +# One summary event, one system message, one user message, and one response message from the assistant +@validate_custom_event_count(count=4) +@validate_transaction_metrics( + "test_chat_completion_stream_v1:test_openai_chat_completion_sync_no_llm_metadata", + scoped_metrics=[("Llm/completion/OpenAI/create", 1)], + rollup_metrics=[("Llm/completion/OpenAI/create", 1)], + background_task=True, +) +@background_task() +def test_openai_chat_completion_sync_no_llm_metadata(set_trace_info, sync_openai_client): + set_trace_info() + + generator = sync_openai_client.chat.completions.create( + model="gpt-3.5-turbo", + messages=_test_openai_chat_completion_messages, + temperature=0.7, + max_tokens=100, + stream=True, + ) + + for resp in generator: + assert resp + + +@disabled_ai_monitoring_streaming_settings +@reset_core_stats_engine() +@validate_custom_event_count(count=0) +@validate_transaction_metrics( + "test_chat_completion_stream_v1:test_openai_chat_completion_sync_ai_monitoring_streaming_disabled", + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + scoped_metrics=[("Llm/completion/OpenAI/create", 1)], + rollup_metrics=[("Llm/completion/OpenAI/create", 1)], + background_task=True, +) +@background_task() +def test_openai_chat_completion_sync_ai_monitoring_streaming_disabled(sync_openai_client): + generator = sync_openai_client.chat.completions.create( + model="gpt-3.5-turbo", + messages=_test_openai_chat_completion_messages, + temperature=0.7, + max_tokens=100, + stream=True, + ) + + for resp in generator: + assert resp + + +@reset_core_stats_engine() +@validate_custom_event_count(count=0) +def test_openai_chat_completion_sync_outside_txn(sync_openai_client): + generator = sync_openai_client.chat.completions.create( + model="gpt-3.5-turbo", + messages=_test_openai_chat_completion_messages, + temperature=0.7, + max_tokens=100, + stream=True, + ) + + for resp in generator: + assert resp + + +@disabled_ai_monitoring_settings +@reset_core_stats_engine() +@validate_custom_event_count(count=0) +@background_task() +def test_openai_chat_completion_sync_ai_monitoring_disabled(sync_openai_client): + generator = sync_openai_client.chat.completions.create( + model="gpt-3.5-turbo", + messages=_test_openai_chat_completion_messages, + temperature=0.7, + max_tokens=100, + stream=True, + ) + + for resp in generator: + assert resp + + +@reset_core_stats_engine() +@validate_custom_events(events_sans_llm_metadata(chat_completion_recorded_events)) +@validate_custom_event_count(count=4) +@validate_transaction_metrics( + "test_chat_completion_stream_v1:test_openai_chat_completion_async_no_llm_metadata", + scoped_metrics=[("Llm/completion/OpenAI/create", 1)], + rollup_metrics=[("Llm/completion/OpenAI/create", 1)], + background_task=True, +) +@background_task() +def test_openai_chat_completion_async_no_llm_metadata(loop, set_trace_info, async_openai_client): + set_trace_info() + + async def consumer(): + generator = await async_openai_client.chat.completions.create( + model="gpt-3.5-turbo", + messages=_test_openai_chat_completion_messages, + temperature=0.7, + max_tokens=100, + stream=True, + ) + async for resp in generator: + assert resp + + loop.run_until_complete(consumer()) + + +@reset_core_stats_engine() +@validate_custom_events(chat_completion_recorded_events) +@validate_custom_event_count(count=4) +@validate_transaction_metrics( + "test_chat_completion_stream_v1:test_openai_chat_completion_async_with_llm_metadata", + scoped_metrics=[("Llm/completion/OpenAI/create", 1)], + rollup_metrics=[("Llm/completion/OpenAI/create", 1)], + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_attributes("agent", ["llm"]) +@background_task() +def test_openai_chat_completion_async_with_llm_metadata(loop, set_trace_info, async_openai_client): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") + + async def consumer(): + generator = await async_openai_client.chat.completions.create( + model="gpt-3.5-turbo", + messages=_test_openai_chat_completion_messages, + temperature=0.7, + max_tokens=100, + stream=True, + ) + async for resp in generator: + assert resp + + loop.run_until_complete(consumer()) + + +@reset_core_stats_engine() +@disabled_ai_monitoring_record_content_settings +@validate_custom_events(events_sans_content(chat_completion_recorded_events)) +@validate_custom_event_count(count=4) +@validate_transaction_metrics( + "test_chat_completion_stream_v1:test_openai_chat_completion_async_no_content", + scoped_metrics=[("Llm/completion/OpenAI/create", 1)], + rollup_metrics=[("Llm/completion/OpenAI/create", 1)], + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_attributes("agent", ["llm"]) +@background_task() +def test_openai_chat_completion_async_no_content(loop, set_trace_info, async_openai_client): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + + async def consumer(): + generator = await async_openai_client.chat.completions.create( + model="gpt-3.5-turbo", + messages=_test_openai_chat_completion_messages, + temperature=0.7, + max_tokens=100, + stream=True, + ) + async for resp in generator: + assert resp + + loop.run_until_complete(consumer()) + + +@reset_core_stats_engine() +@override_llm_token_callback_settings(llm_token_count_callback) +@validate_custom_events(add_token_count_to_events(chat_completion_recorded_events)) +# One summary event, one system message, one user message, and one response message from the assistant +# @validate_custom_event_count(count=4) +@validate_transaction_metrics( + name="test_chat_completion_stream_v1:test_openai_chat_completion_async_with_token_count", + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_attributes("agent", ["llm"]) +@background_task() +def test_openai_chat_completion_async_with_token_count(set_trace_info, loop, async_openai_client): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + + async def consumer(): + generator = await async_openai_client.chat.completions.create( + model="gpt-3.5-turbo", + messages=_test_openai_chat_completion_messages, + temperature=0.7, + max_tokens=100, + stream=True, + ) + async for resp in generator: + assert resp + + loop.run_until_complete(consumer()) + + +@disabled_ai_monitoring_streaming_settings +@reset_core_stats_engine() +@validate_custom_event_count(count=0) +@validate_transaction_metrics( + "test_chat_completion_stream_v1:test_openai_chat_completion_async_ai_monitoring_streaming_disabled", + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + scoped_metrics=[("Llm/completion/OpenAI/create", 1)], + rollup_metrics=[("Llm/completion/OpenAI/create", 1)], + background_task=True, +) +@background_task() +def test_openai_chat_completion_async_ai_monitoring_streaming_disabled(loop, async_openai_client): + async def consumer(): + generator = await async_openai_client.chat.completions.create( + model="gpt-3.5-turbo", + messages=_test_openai_chat_completion_messages, + temperature=0.7, + max_tokens=100, + stream=True, + ) + async for resp in generator: + assert resp + + loop.run_until_complete(consumer()) + + +@reset_core_stats_engine() +@validate_custom_event_count(count=0) +def test_openai_chat_completion_async_outside_transaction(loop, async_openai_client): + async def consumer(): + generator = await async_openai_client.chat.completions.create( + model="gpt-3.5-turbo", + messages=_test_openai_chat_completion_messages, + temperature=0.7, + max_tokens=100, + stream=True, + ) + async for resp in generator: + assert resp + + loop.run_until_complete(consumer()) + + +@disabled_ai_monitoring_settings +@reset_core_stats_engine() +@validate_custom_event_count(count=0) +@background_task() +def test_openai_chat_completion_async_disabled_ai_monitoring_settings(loop, async_openai_client): + async def consumer(): + generator = await async_openai_client.chat.completions.create( + model="gpt-3.5-turbo", + messages=_test_openai_chat_completion_messages, + temperature=0.7, + max_tokens=100, + stream=True, + ) + async for resp in generator: + assert resp + + loop.run_until_complete(consumer()) diff --git a/tests/mlmodel_openai/test_chat_completion_v1.py b/tests/mlmodel_openai/test_chat_completion_v1.py new file mode 100644 index 000000000..1ad95c274 --- /dev/null +++ b/tests/mlmodel_openai/test_chat_completion_v1.py @@ -0,0 +1,435 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import openai +from testing_support.fixtures import ( + override_llm_token_callback_settings, + reset_core_stats_engine, + validate_attributes, + validate_custom_event_count, +) +from testing_support.ml_testing_utils import ( # noqa: F401 + add_token_count_to_events, + disabled_ai_monitoring_record_content_settings, + disabled_ai_monitoring_settings, + disabled_ai_monitoring_streaming_settings, + events_sans_content, + events_sans_llm_metadata, + llm_token_count_callback, + set_trace_info, +) +from testing_support.validators.validate_custom_events import validate_custom_events +from testing_support.validators.validate_transaction_metrics import ( + validate_transaction_metrics, +) + +from newrelic.api.background_task import background_task +from newrelic.api.transaction import add_custom_attribute + +_test_openai_chat_completion_messages = ( + {"role": "system", "content": "You are a scientist."}, + {"role": "user", "content": "What is 212 degrees Fahrenheit converted to Celsius?"}, +) + +chat_completion_recorded_events = [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "span_id": None, + "trace_id": "trace-id", + "request_id": "f8d0f53b6881c5c0a3698e55f8f410ac", + "duration": None, # Response time varies each test run + "request.model": "gpt-3.5-turbo", + "response.model": "gpt-3.5-turbo-0613", + "response.organization": "new-relic-nkmd8b", + "request.temperature": 0.7, + "request.max_tokens": 100, + "response.choices.finish_reason": "stop", + "response.headers.llmVersion": "2020-10-01", + "response.headers.ratelimitLimitRequests": 200, + "response.headers.ratelimitLimitTokens": 40000, + "response.headers.ratelimitResetTokens": "180ms", + "response.headers.ratelimitResetRequests": "11m32.334s", + "response.headers.ratelimitRemainingTokens": 39880, + "response.headers.ratelimitRemainingRequests": 198, + "response.headers.ratelimitLimitTokensUsageBased": 40000, + "response.headers.ratelimitResetTokensUsageBased": "180ms", + "response.headers.ratelimitRemainingTokensUsageBased": 39880, + "vendor": "openai", + "ingest_source": "Python", + "response.number_of_messages": 3, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": "chatcmpl-8TJ9dS50zgQM7XicE8PLnCyEihRug-0", + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": "f8d0f53b6881c5c0a3698e55f8f410ac", + "span_id": None, + "trace_id": "trace-id", + "content": "You are a scientist.", + "role": "system", + "completion_id": None, + "sequence": 0, + "response.model": "gpt-3.5-turbo-0613", + "vendor": "openai", + "ingest_source": "Python", + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": "chatcmpl-8TJ9dS50zgQM7XicE8PLnCyEihRug-1", + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": "f8d0f53b6881c5c0a3698e55f8f410ac", + "span_id": None, + "trace_id": "trace-id", + "content": "What is 212 degrees Fahrenheit converted to Celsius?", + "role": "user", + "completion_id": None, + "sequence": 1, + "response.model": "gpt-3.5-turbo-0613", + "vendor": "openai", + "ingest_source": "Python", + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": "chatcmpl-8TJ9dS50zgQM7XicE8PLnCyEihRug-2", + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": "f8d0f53b6881c5c0a3698e55f8f410ac", + "span_id": None, + "trace_id": "trace-id", + "content": "212 degrees Fahrenheit is equal to 100 degrees Celsius.", + "role": "assistant", + "completion_id": None, + "sequence": 2, + "response.model": "gpt-3.5-turbo-0613", + "vendor": "openai", + "is_response": True, + "ingest_source": "Python", + }, + ), +] + + +@reset_core_stats_engine() +@validate_custom_events(chat_completion_recorded_events) +# One summary event, one system message, one user message, and one response message from the assistant +@validate_custom_event_count(count=4) +@validate_transaction_metrics( + name="test_chat_completion_v1:test_openai_chat_completion_sync_with_llm_metadata", + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_attributes("agent", ["llm"]) +@background_task() +def test_openai_chat_completion_sync_with_llm_metadata(set_trace_info, sync_openai_client): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") + + sync_openai_client.chat.completions.create( + model="gpt-3.5-turbo", messages=_test_openai_chat_completion_messages, temperature=0.7, max_tokens=100 + ) + + +@reset_core_stats_engine() +@disabled_ai_monitoring_record_content_settings +@validate_custom_events(events_sans_content(chat_completion_recorded_events)) +# One summary event, one system message, one user message, and one response message from the assistant +@validate_custom_event_count(count=4) +@validate_transaction_metrics( + name="test_chat_completion_v1:test_openai_chat_completion_sync_no_content", + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_attributes("agent", ["llm"]) +@background_task() +def test_openai_chat_completion_sync_no_content(set_trace_info, sync_openai_client): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + + sync_openai_client.chat.completions.create( + model="gpt-3.5-turbo", messages=_test_openai_chat_completion_messages, temperature=0.7, max_tokens=100 + ) + + +@reset_core_stats_engine() +@override_llm_token_callback_settings(llm_token_count_callback) +@validate_custom_events(add_token_count_to_events(chat_completion_recorded_events)) +# One summary event, one system message, one user message, and one response message from the assistant +@validate_custom_event_count(count=4) +@validate_transaction_metrics( + name="test_chat_completion_v1:test_openai_chat_completion_sync_with_token_count", + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_attributes("agent", ["llm"]) +@background_task() +def test_openai_chat_completion_sync_with_token_count(set_trace_info, sync_openai_client): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + + sync_openai_client.chat.completions.create( + model="gpt-3.5-turbo", messages=_test_openai_chat_completion_messages, temperature=0.7, max_tokens=100 + ) + + +@reset_core_stats_engine() +@validate_custom_events(events_sans_llm_metadata(chat_completion_recorded_events)) +# One summary event, one system message, one user message, and one response message from the assistant +@validate_custom_event_count(count=4) +@validate_transaction_metrics( + "test_chat_completion_v1:test_openai_chat_completion_sync_no_llm_metadata", + scoped_metrics=[("Llm/completion/OpenAI/create", 1)], + rollup_metrics=[("Llm/completion/OpenAI/create", 1)], + background_task=True, +) +@background_task() +def test_openai_chat_completion_sync_no_llm_metadata(set_trace_info, sync_openai_client): + set_trace_info() + + sync_openai_client.chat.completions.create( + model="gpt-3.5-turbo", messages=_test_openai_chat_completion_messages, temperature=0.7, max_tokens=100 + ) + + +@reset_core_stats_engine() +@disabled_ai_monitoring_streaming_settings +@validate_custom_events(events_sans_llm_metadata(chat_completion_recorded_events)) +# One summary event, one system message, one user message, and one response message from the assistant +@validate_custom_event_count(count=4) +@validate_transaction_metrics( + "test_chat_completion_v1:test_openai_chat_completion_sync_stream_monitoring_disabled", + scoped_metrics=[("Llm/completion/OpenAI/create", 1)], + rollup_metrics=[("Llm/completion/OpenAI/create", 1)], + background_task=True, +) +@background_task() +def test_openai_chat_completion_sync_stream_monitoring_disabled(set_trace_info, sync_openai_client): + set_trace_info() + + sync_openai_client.chat.completions.create( + model="gpt-3.5-turbo", messages=_test_openai_chat_completion_messages, temperature=0.7, max_tokens=100 + ) + + +@reset_core_stats_engine() +@validate_custom_event_count(count=0) +def test_openai_chat_completion_sync_outside_txn(sync_openai_client): + sync_openai_client.chat.completions.create( + model="gpt-3.5-turbo", messages=_test_openai_chat_completion_messages, temperature=0.7, max_tokens=100 + ) + + +@disabled_ai_monitoring_settings +@reset_core_stats_engine() +@validate_custom_event_count(count=0) +@background_task() +def test_openai_chat_completion_sync_ai_monitoring_disabled(sync_openai_client): + sync_openai_client.chat.completions.create( + model="gpt-3.5-turbo", messages=_test_openai_chat_completion_messages, temperature=0.7, max_tokens=100 + ) + + +@reset_core_stats_engine() +@validate_custom_events(events_sans_llm_metadata(chat_completion_recorded_events)) +@validate_custom_event_count(count=4) +@validate_transaction_metrics( + "test_chat_completion_v1:test_openai_chat_completion_async_no_llm_metadata", + scoped_metrics=[("Llm/completion/OpenAI/create", 1)], + rollup_metrics=[("Llm/completion/OpenAI/create", 1)], + background_task=True, +) +@background_task() +def test_openai_chat_completion_async_no_llm_metadata(loop, set_trace_info, async_openai_client): + set_trace_info() + + loop.run_until_complete( + async_openai_client.chat.completions.create( + model="gpt-3.5-turbo", messages=_test_openai_chat_completion_messages, temperature=0.7, max_tokens=100 + ) + ) + + +@reset_core_stats_engine() +@disabled_ai_monitoring_streaming_settings +@validate_custom_events(events_sans_llm_metadata(chat_completion_recorded_events)) +@validate_custom_event_count(count=4) +@validate_transaction_metrics( + "test_chat_completion_v1:test_openai_chat_completion_async_stream_monitoring_disabled", + scoped_metrics=[("Llm/completion/OpenAI/create", 1)], + rollup_metrics=[("Llm/completion/OpenAI/create", 1)], + background_task=True, +) +@background_task() +def test_openai_chat_completion_async_stream_monitoring_disabled(loop, set_trace_info, async_openai_client): + set_trace_info() + + loop.run_until_complete( + async_openai_client.chat.completions.create( + model="gpt-3.5-turbo", messages=_test_openai_chat_completion_messages, temperature=0.7, max_tokens=100 + ) + ) + + +@reset_core_stats_engine() +@validate_custom_events(chat_completion_recorded_events) +@validate_custom_event_count(count=4) +@validate_transaction_metrics( + "test_chat_completion_v1:test_openai_chat_completion_async_with_llm_metadata", + scoped_metrics=[("Llm/completion/OpenAI/create", 1)], + rollup_metrics=[("Llm/completion/OpenAI/create", 1)], + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_attributes("agent", ["llm"]) +@background_task() +def test_openai_chat_completion_async_with_llm_metadata(loop, set_trace_info, async_openai_client): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") + + loop.run_until_complete( + async_openai_client.chat.completions.create( + model="gpt-3.5-turbo", messages=_test_openai_chat_completion_messages, temperature=0.7, max_tokens=100 + ) + ) + + +@reset_core_stats_engine() +@disabled_ai_monitoring_record_content_settings +@validate_custom_events(events_sans_content(chat_completion_recorded_events)) +@validate_custom_event_count(count=4) +@validate_transaction_metrics( + "test_chat_completion_v1:test_openai_chat_completion_async_with_llm_metadata_no_content", + scoped_metrics=[("Llm/completion/OpenAI/create", 1)], + rollup_metrics=[("Llm/completion/OpenAI/create", 1)], + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_attributes("agent", ["llm"]) +@background_task() +def test_openai_chat_completion_async_with_llm_metadata_no_content(loop, set_trace_info, async_openai_client): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + + loop.run_until_complete( + async_openai_client.chat.completions.create( + model="gpt-3.5-turbo", messages=_test_openai_chat_completion_messages, temperature=0.7, max_tokens=100 + ) + ) + + +@reset_core_stats_engine() +@override_llm_token_callback_settings(llm_token_count_callback) +@validate_custom_events(add_token_count_to_events(chat_completion_recorded_events)) +# One summary event, one system message, one user message, and one response message from the assistant +@validate_custom_event_count(count=4) +@validate_transaction_metrics( + name="test_chat_completion_v1:test_openai_chat_completion_async_in_txn_with_token_count", + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_attributes("agent", ["llm"]) +@background_task() +def test_openai_chat_completion_async_in_txn_with_token_count(set_trace_info, loop, async_openai_client): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + + loop.run_until_complete( + async_openai_client.chat.completions.create( + model="gpt-3.5-turbo", messages=_test_openai_chat_completion_messages, temperature=0.7, max_tokens=100 + ) + ) + + +@reset_core_stats_engine() +@validate_custom_event_count(count=0) +def test_openai_chat_completion_async_outside_transaction(loop, async_openai_client): + loop.run_until_complete( + async_openai_client.chat.completions.create( + model="gpt-3.5-turbo", messages=_test_openai_chat_completion_messages, temperature=0.7, max_tokens=100 + ) + ) + + +@disabled_ai_monitoring_settings +@reset_core_stats_engine() +@validate_custom_event_count(count=0) +@background_task() +def test_openai_chat_completion_async_ai_monitoring_disabled(loop, async_openai_client): + loop.run_until_complete( + async_openai_client.chat.completions.create( + model="gpt-3.5-turbo", messages=_test_openai_chat_completion_messages, temperature=0.7, max_tokens=100 + ) + ) + + +@reset_core_stats_engine() +# One summary event, one system message, one user message, and one response message from the assistant +@validate_custom_event_count(count=3) +@validate_attributes("agent", ["llm"]) +@background_task() +def test_openai_chat_completion_no_usage_data(set_trace_info, sync_openai_client, loop): + # Only testing that there are events, and there was no exception raised + set_trace_info() + sync_openai_client.chat.completions.create( + model="gpt-3.5-turbo", messages=({"role": "user", "content": "No usage data"},), temperature=0.7, max_tokens=100 + ) + + +@reset_core_stats_engine() +# One summary event, one system message, one user message, and one response message from the assistant +@validate_custom_event_count(count=3) +@validate_attributes("agent", ["llm"]) +@background_task() +def test_openai_chat_completion_async_no_usage_data(set_trace_info, async_openai_client, loop): + # Only testing that there are events, and there was no exception raised + set_trace_info() + loop.run_until_complete( + async_openai_client.chat.completions.create( + model="gpt-3.5-turbo", + messages=({"role": "user", "content": "No usage data"},), + temperature=0.7, + max_tokens=100, + ) + ) diff --git a/tests/mlmodel_openai/test_embeddings.py b/tests/mlmodel_openai/test_embeddings.py new file mode 100644 index 000000000..5532d475d --- /dev/null +++ b/tests/mlmodel_openai/test_embeddings.py @@ -0,0 +1,250 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import openai +from testing_support.fixtures import ( # override_application_settings, + override_llm_token_callback_settings, + reset_core_stats_engine, + validate_attributes, + validate_custom_event_count, +) +from testing_support.ml_testing_utils import ( # noqa: F401 + add_token_count_to_events, + disabled_ai_monitoring_record_content_settings, + disabled_ai_monitoring_settings, + events_sans_content, + llm_token_count_callback, + set_trace_info, +) +from testing_support.validators.validate_custom_events import validate_custom_events +from testing_support.validators.validate_transaction_metrics import ( + validate_transaction_metrics, +) + +from newrelic.api.background_task import background_task +from newrelic.api.transaction import add_custom_attribute + +embedding_recorded_events = [ + ( + {"type": "LlmEmbedding"}, + { + "id": None, # UUID that varies with each run + "span_id": None, + "trace_id": "trace-id", + "input": "This is an embedding test.", + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "duration": None, # Response time varies each test run + "response.model": "text-embedding-ada-002-v2", + "request.model": "text-embedding-ada-002", + "request_id": "c70828b2293314366a76a2b1dcb20688", + "response.organization": "new-relic-nkmd8b", + "response.headers.llmVersion": "2020-10-01", + "response.headers.ratelimitLimitRequests": 200, + "response.headers.ratelimitLimitTokens": 150000, + "response.headers.ratelimitResetTokens": "2ms", + "response.headers.ratelimitResetRequests": "19m45.394s", + "response.headers.ratelimitRemainingTokens": 149994, + "response.headers.ratelimitRemainingRequests": 197, + "vendor": "openai", + "ingest_source": "Python", + }, + ), +] + + +@reset_core_stats_engine() +@validate_custom_events(embedding_recorded_events) +@validate_custom_event_count(count=1) +@validate_transaction_metrics( + name="test_embeddings:test_openai_embedding_sync", + scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], + rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_attributes("agent", ["llm"]) +@background_task() +def test_openai_embedding_sync(set_trace_info): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") + + openai.Embedding.create(input="This is an embedding test.", model="text-embedding-ada-002") + + +@reset_core_stats_engine() +@disabled_ai_monitoring_record_content_settings +@validate_custom_events(events_sans_content(embedding_recorded_events)) +@validate_custom_event_count(count=1) +@validate_transaction_metrics( + name="test_embeddings:test_openai_embedding_sync_no_content", + scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], + rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_attributes("agent", ["llm"]) +@background_task() +def test_openai_embedding_sync_no_content(set_trace_info): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") + + openai.Embedding.create(input="This is an embedding test.", model="text-embedding-ada-002") + + +@reset_core_stats_engine() +@override_llm_token_callback_settings(llm_token_count_callback) +@validate_custom_events(add_token_count_to_events(embedding_recorded_events)) +@validate_custom_event_count(count=1) +@validate_transaction_metrics( + name="test_embeddings:test_openai_embedding_sync_with_token_count", + scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], + rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_attributes("agent", ["llm"]) +@background_task() +def test_openai_embedding_sync_with_token_count(set_trace_info): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") + + openai.Embedding.create(input="This is an embedding test.", model="text-embedding-ada-002") + + +@reset_core_stats_engine() +@validate_custom_event_count(count=0) +def test_openai_embedding_sync_outside_txn(): + openai.Embedding.create(input="This is an embedding test.", model="text-embedding-ada-002") + + +@disabled_ai_monitoring_settings +@reset_core_stats_engine() +@validate_custom_event_count(count=0) +@background_task() +def test_openai_embedding_sync_disabled_ai_monitoring_events(set_trace_info): + set_trace_info() + openai.Embedding.create(input="This is an embedding test.", model="text-embedding-ada-002") + + +@reset_core_stats_engine() +@validate_custom_events(embedding_recorded_events) +@validate_custom_event_count(count=1) +@validate_transaction_metrics( + name="test_embeddings:test_openai_embedding_async", + scoped_metrics=[("Llm/embedding/OpenAI/acreate", 1)], + rollup_metrics=[("Llm/embedding/OpenAI/acreate", 1)], + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_attributes("agent", ["llm"]) +@background_task() +def test_openai_embedding_async(loop, set_trace_info): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") + + loop.run_until_complete( + openai.Embedding.acreate(input="This is an embedding test.", model="text-embedding-ada-002") + ) + + +@reset_core_stats_engine() +@disabled_ai_monitoring_record_content_settings +@validate_custom_events(events_sans_content(embedding_recorded_events)) +@validate_custom_event_count(count=1) +@validate_transaction_metrics( + name="test_embeddings:test_openai_embedding_async_no_content", + scoped_metrics=[("Llm/embedding/OpenAI/acreate", 1)], + rollup_metrics=[("Llm/embedding/OpenAI/acreate", 1)], + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_attributes("agent", ["llm"]) +@background_task() +def test_openai_embedding_async_no_content(loop, set_trace_info): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") + + loop.run_until_complete( + openai.Embedding.acreate(input="This is an embedding test.", model="text-embedding-ada-002") + ) + + +@reset_core_stats_engine() +@override_llm_token_callback_settings(llm_token_count_callback) +@validate_custom_events(add_token_count_to_events(embedding_recorded_events)) +@validate_custom_event_count(count=1) +@validate_transaction_metrics( + name="test_embeddings:test_openai_embedding_async_with_token_count", + scoped_metrics=[("Llm/embedding/OpenAI/acreate", 1)], + rollup_metrics=[("Llm/embedding/OpenAI/acreate", 1)], + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_attributes("agent", ["llm"]) +@background_task() +def test_openai_embedding_async_with_token_count(loop, set_trace_info): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") + + loop.run_until_complete( + openai.Embedding.acreate(input="This is an embedding test.", model="text-embedding-ada-002") + ) + + +@reset_core_stats_engine() +@validate_custom_event_count(count=0) +def test_openai_embedding_async_outside_transaction(loop): + loop.run_until_complete( + openai.Embedding.acreate(input="This is an embedding test.", model="text-embedding-ada-002") + ) + + +@disabled_ai_monitoring_settings +@reset_core_stats_engine() +@validate_custom_event_count(count=0) +@background_task() +def test_openai_embedding_async_disabled_ai_monitoring_events(loop): + loop.run_until_complete( + openai.Embedding.acreate(input="This is an embedding test.", model="text-embedding-ada-002") + ) + + +def test_openai_embedding_functions_marked_as_wrapped_for_sdk_compatibility(): + assert openai.Embedding._nr_wrapped + assert openai.util.convert_to_openai_object._nr_wrapped diff --git a/tests/mlmodel_openai/test_embeddings_error.py b/tests/mlmodel_openai/test_embeddings_error.py new file mode 100644 index 000000000..6dd465bc8 --- /dev/null +++ b/tests/mlmodel_openai/test_embeddings_error.py @@ -0,0 +1,565 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import openai +import pytest +from testing_support.fixtures import ( + dt_enabled, + override_llm_token_callback_settings, + reset_core_stats_engine, + validate_custom_event_count, +) +from testing_support.ml_testing_utils import ( # noqa: F401 + add_token_count_to_events, + disabled_ai_monitoring_record_content_settings, + events_sans_content, + llm_token_count_callback, + set_trace_info, +) +from testing_support.validators.validate_custom_events import validate_custom_events +from testing_support.validators.validate_error_trace_attributes import ( + validate_error_trace_attributes, +) +from testing_support.validators.validate_span_events import validate_span_events +from testing_support.validators.validate_transaction_metrics import ( + validate_transaction_metrics, +) + +from newrelic.api.background_task import background_task +from newrelic.common.object_names import callable_name + +embedding_recorded_events = [ + ( + {"type": "LlmEmbedding"}, + { + "id": None, # UUID that varies with each run + "span_id": None, + "trace_id": "trace-id", + "input": "This is an embedding test with no model.", + "duration": None, # Response time varies each test run + "vendor": "openai", + "ingest_source": "Python", + "error": True, + }, + ), +] + + +# No model provided +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(openai.InvalidRequestError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "error.param": "engine", + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "Must provide an 'engine' or 'model' parameter to create a ", + } +) +@validate_transaction_metrics( + name="test_embeddings_error:test_embeddings_invalid_request_error_no_model", + scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], + rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_custom_events(embedding_recorded_events) +@validate_custom_event_count(count=1) +@background_task() +def test_embeddings_invalid_request_error_no_model(set_trace_info): + with pytest.raises(openai.InvalidRequestError): + set_trace_info() + openai.Embedding.create( + input="This is an embedding test with no model.", + # no model provided + ) + + +@dt_enabled +@reset_core_stats_engine() +@disabled_ai_monitoring_record_content_settings +@validate_error_trace_attributes( + callable_name(openai.InvalidRequestError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "error.param": "engine", + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "Must provide an 'engine' or 'model' parameter to create a ", + } +) +@validate_transaction_metrics( + name="test_embeddings_error:test_embeddings_invalid_request_error_no_model_no_content", + scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], + rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_custom_events(events_sans_content(embedding_recorded_events)) +@validate_custom_event_count(count=1) +@background_task() +def test_embeddings_invalid_request_error_no_model_no_content(set_trace_info): + with pytest.raises(openai.InvalidRequestError): + set_trace_info() + openai.Embedding.create( + input="This is an embedding test with no model.", + # no model provided + ) + + +invalid_model_events = [ + ( + {"type": "LlmEmbedding"}, + { + "id": None, # UUID that varies with each run + "span_id": None, + "trace_id": "trace-id", + "input": "Model does not exist.", + "duration": None, # Response time varies each test run + "request.model": "does-not-exist", # No model in this test case + "vendor": "openai", + "ingest_source": "Python", + "error": True, + }, + ), +] + + +@dt_enabled +@reset_core_stats_engine() +@override_llm_token_callback_settings(llm_token_count_callback) +@validate_error_trace_attributes( + callable_name(openai.InvalidRequestError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "http.statusCode": 404, + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "The model `does-not-exist` does not exist", + # "http.statusCode": 404, + } +) +@validate_transaction_metrics( + name="test_embeddings_error:test_embeddings_invalid_request_error_invalid_model_with_token_count", + scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], + rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_custom_events(add_token_count_to_events(invalid_model_events)) +@validate_custom_event_count(count=1) +@background_task() +def test_embeddings_invalid_request_error_invalid_model_with_token_count(set_trace_info): + with pytest.raises(openai.InvalidRequestError): + set_trace_info() + openai.Embedding.create(input="Model does not exist.", model="does-not-exist") + + +# Invalid model provided +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(openai.InvalidRequestError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "http.statusCode": 404, + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "The model `does-not-exist` does not exist", + # "http.statusCode": 404, + } +) +@validate_transaction_metrics( + name="test_embeddings_error:test_embeddings_invalid_request_error_invalid_model", + scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], + rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_custom_events(invalid_model_events) +@validate_custom_event_count(count=1) +@background_task() +def test_embeddings_invalid_request_error_invalid_model(set_trace_info): + set_trace_info() + with pytest.raises(openai.InvalidRequestError): + openai.Embedding.create(input="Model does not exist.", model="does-not-exist") + + +embedding_auth_error_events = [ + ( + {"type": "LlmEmbedding"}, + { + "id": None, # UUID that varies with each run + "span_id": None, + "trace_id": "trace-id", + "input": "Invalid API key.", + "duration": None, # Response time varies each test run + "request.model": "text-embedding-ada-002", # No model in this test case + "vendor": "openai", + "ingest_source": "Python", + "error": True, + }, + ), +] + + +# No api_key provided +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(openai.error.AuthenticationError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": {}, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "No API key provided. You can set your API key in code using 'openai.api_key = ', or you can set the environment variable OPENAI_API_KEY=). If your API key is stored in a file, you can point the openai module at it with 'openai.api_key_path = '. You can generate API keys in the OpenAI web interface. See https://platform.openai.com/account/api-keys for details.", + } +) +@validate_transaction_metrics( + name="test_embeddings_error:test_embeddings_authentication_error", + scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], + rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_custom_events(embedding_auth_error_events) +@validate_custom_event_count(count=1) +@background_task() +def test_embeddings_authentication_error(monkeypatch, set_trace_info): + with pytest.raises(openai.error.AuthenticationError): + set_trace_info() + monkeypatch.setattr(openai, "api_key", None) # openai.api_key = None + openai.Embedding.create(input="Invalid API key.", model="text-embedding-ada-002") + + +embedding_invalid_key_error_events = [ + ( + {"type": "LlmEmbedding"}, + { + "id": None, # UUID that varies with each run + "span_id": None, + "trace_id": "trace-id", + "input": "Embedded: Invalid API key.", + "duration": None, # Response time varies each test run + "request.model": "text-embedding-ada-002", # No model in this test case + "vendor": "openai", + "ingest_source": "Python", + "error": True, + }, + ), +] + + +# Wrong api_key provided +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(openai.error.AuthenticationError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "http.statusCode": 401, + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "Incorrect API key provided: DEADBEEF. You can find your API key at https://platform.openai.com/account/api-keys.", + } +) +@validate_transaction_metrics( + name="test_embeddings_error:test_embeddings_wrong_api_key_error", + scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], + rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_custom_events(embedding_invalid_key_error_events) +@validate_custom_event_count(count=1) +@background_task() +def test_embeddings_wrong_api_key_error(monkeypatch, set_trace_info): + with pytest.raises(openai.error.AuthenticationError): + set_trace_info() + monkeypatch.setattr(openai, "api_key", "DEADBEEF") # openai.api_key = "DEADBEEF" + openai.Embedding.create(input="Embedded: Invalid API key.", model="text-embedding-ada-002") + + +# Async tests: + + +# No model provided +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(openai.InvalidRequestError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "error.param": "engine", + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "Must provide an 'engine' or 'model' parameter to create a ", + } +) +@validate_transaction_metrics( + name="test_embeddings_error:test_embeddings_invalid_request_error_no_model_async", + scoped_metrics=[("Llm/embedding/OpenAI/acreate", 1)], + rollup_metrics=[("Llm/embedding/OpenAI/acreate", 1)], + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_custom_events(embedding_recorded_events) +@validate_custom_event_count(count=1) +@background_task() +def test_embeddings_invalid_request_error_no_model_async(loop, set_trace_info): + with pytest.raises(openai.InvalidRequestError): + set_trace_info() + loop.run_until_complete( + openai.Embedding.acreate( + input="This is an embedding test with no model.", + # No model provided + ) + ) + + +@dt_enabled +@reset_core_stats_engine() +@disabled_ai_monitoring_record_content_settings +@validate_error_trace_attributes( + callable_name(openai.InvalidRequestError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "error.param": "engine", + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "Must provide an 'engine' or 'model' parameter to create a ", + } +) +@validate_transaction_metrics( + name="test_embeddings_error:test_embeddings_invalid_request_error_no_model_async_no_content", + scoped_metrics=[("Llm/embedding/OpenAI/acreate", 1)], + rollup_metrics=[("Llm/embedding/OpenAI/acreate", 1)], + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_custom_events(events_sans_content(embedding_recorded_events)) +@validate_custom_event_count(count=1) +@background_task() +def test_embeddings_invalid_request_error_no_model_async_no_content(loop, set_trace_info): + with pytest.raises(openai.InvalidRequestError): + set_trace_info() + loop.run_until_complete( + openai.Embedding.acreate( + input="This is an embedding test with no model.", + # No model provided + ) + ) + + +@dt_enabled +@reset_core_stats_engine() +@override_llm_token_callback_settings(llm_token_count_callback) +@validate_error_trace_attributes( + callable_name(openai.InvalidRequestError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "http.statusCode": 404, + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "The model `does-not-exist` does not exist", + } +) +@validate_transaction_metrics( + name="test_embeddings_error:test_embeddings_invalid_request_error_invalid_model_with_token_count_async", + scoped_metrics=[("Llm/embedding/OpenAI/acreate", 1)], + rollup_metrics=[("Llm/embedding/OpenAI/acreate", 1)], + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_custom_events(add_token_count_to_events(invalid_model_events)) +@validate_custom_event_count(count=1) +@background_task() +def test_embeddings_invalid_request_error_invalid_model_with_token_count_async(set_trace_info, loop): + with pytest.raises(openai.InvalidRequestError): + set_trace_info() + loop.run_until_complete(openai.Embedding.acreate(input="Model does not exist.", model="does-not-exist")) + + +# Invalid model provided +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(openai.InvalidRequestError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "http.statusCode": 404, + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "The model `does-not-exist` does not exist", + } +) +@validate_transaction_metrics( + name="test_embeddings_error:test_embeddings_invalid_request_error_invalid_model_async", + scoped_metrics=[("Llm/embedding/OpenAI/acreate", 1)], + rollup_metrics=[("Llm/embedding/OpenAI/acreate", 1)], + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_custom_events(invalid_model_events) +@validate_custom_event_count(count=1) +@background_task() +def test_embeddings_invalid_request_error_invalid_model_async(loop, set_trace_info): + with pytest.raises(openai.InvalidRequestError): + set_trace_info() + loop.run_until_complete(openai.Embedding.acreate(input="Model does not exist.", model="does-not-exist")) + + +# No api_key provided +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(openai.error.AuthenticationError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": {}, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "No API key provided. You can set your API key in code using 'openai.api_key = ', or you can set the environment variable OPENAI_API_KEY=). If your API key is stored in a file, you can point the openai module at it with 'openai.api_key_path = '. You can generate API keys in the OpenAI web interface. See https://platform.openai.com/account/api-keys for details.", + } +) +@validate_transaction_metrics( + name="test_embeddings_error:test_embeddings_authentication_error_async", + scoped_metrics=[("Llm/embedding/OpenAI/acreate", 1)], + rollup_metrics=[("Llm/embedding/OpenAI/acreate", 1)], + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_custom_events(embedding_auth_error_events) +@validate_custom_event_count(count=1) +@background_task() +def test_embeddings_authentication_error_async(loop, monkeypatch, set_trace_info): + with pytest.raises(openai.error.AuthenticationError): + set_trace_info() + monkeypatch.setattr(openai, "api_key", None) # openai.api_key = None + loop.run_until_complete(openai.Embedding.acreate(input="Invalid API key.", model="text-embedding-ada-002")) + + +# Wrong api_key provided +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(openai.error.AuthenticationError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "http.statusCode": 401, + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "Incorrect API key provided: DEADBEEF. You can find your API key at https://platform.openai.com/account/api-keys.", + } +) +@validate_transaction_metrics( + name="test_embeddings_error:test_embeddings_wrong_api_key_error_async", + scoped_metrics=[("Llm/embedding/OpenAI/acreate", 1)], + rollup_metrics=[("Llm/embedding/OpenAI/acreate", 1)], + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_custom_events(embedding_invalid_key_error_events) +@validate_custom_event_count(count=1) +@background_task() +def test_embeddings_wrong_api_key_error_async(loop, monkeypatch, set_trace_info): + with pytest.raises(openai.error.AuthenticationError): + set_trace_info() + monkeypatch.setattr(openai, "api_key", "DEADBEEF") # openai.api_key = "DEADBEEF" + loop.run_until_complete( + openai.Embedding.acreate(input="Embedded: Invalid API key.", model="text-embedding-ada-002") + ) diff --git a/tests/mlmodel_openai/test_embeddings_error_v1.py b/tests/mlmodel_openai/test_embeddings_error_v1.py new file mode 100644 index 000000000..06a38cf46 --- /dev/null +++ b/tests/mlmodel_openai/test_embeddings_error_v1.py @@ -0,0 +1,471 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys + +import openai +import pytest +from testing_support.fixtures import ( + dt_enabled, + override_llm_token_callback_settings, + reset_core_stats_engine, + validate_custom_event_count, +) +from testing_support.ml_testing_utils import ( # noqa: F401 + add_token_count_to_events, + disabled_ai_monitoring_record_content_settings, + events_sans_content, + llm_token_count_callback, + set_trace_info, +) +from testing_support.validators.validate_custom_events import validate_custom_events +from testing_support.validators.validate_error_trace_attributes import ( + validate_error_trace_attributes, +) +from testing_support.validators.validate_span_events import validate_span_events +from testing_support.validators.validate_transaction_metrics import ( + validate_transaction_metrics, +) + +from newrelic.api.background_task import background_task +from newrelic.common.object_names import callable_name + +# Sync tests: +no_model_events = [ + ( + {"type": "LlmEmbedding"}, + { + "id": None, # UUID that varies with each run + "span_id": None, + "trace_id": "trace-id", + "input": "This is an embedding test with no model.", + "duration": None, # Response time varies each test run + "vendor": "openai", + "ingest_source": "Python", + "error": True, + }, + ), +] + + +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(TypeError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": {}, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "create() missing 1 required keyword-only argument: 'model'" + if sys.version_info < (3, 10) + else "Embeddings.create() missing 1 required keyword-only argument: 'model'", + } +) +@validate_transaction_metrics( + name="test_embeddings_error_v1:test_embeddings_invalid_request_error_no_model", + scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], + rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_custom_events(no_model_events) +@validate_custom_event_count(count=1) +@background_task() +def test_embeddings_invalid_request_error_no_model(set_trace_info, sync_openai_client): + with pytest.raises(TypeError): + set_trace_info() + sync_openai_client.embeddings.create(input="This is an embedding test with no model.") # no model provided + + +@dt_enabled +@disabled_ai_monitoring_record_content_settings +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(TypeError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": {}, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "create() missing 1 required keyword-only argument: 'model'" + if sys.version_info < (3, 10) + else "Embeddings.create() missing 1 required keyword-only argument: 'model'", + } +) +@validate_transaction_metrics( + name="test_embeddings_error_v1:test_embeddings_invalid_request_error_no_model_no_content", + scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], + rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_custom_events(events_sans_content(no_model_events)) +@validate_custom_event_count(count=1) +@background_task() +def test_embeddings_invalid_request_error_no_model_no_content(set_trace_info, sync_openai_client): + with pytest.raises(TypeError): + set_trace_info() + sync_openai_client.embeddings.create(input="This is an embedding test with no model.") # no model provided + + +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(TypeError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": {}, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "create() missing 1 required keyword-only argument: 'model'" + if sys.version_info < (3, 10) + else "AsyncEmbeddings.create() missing 1 required keyword-only argument: 'model'", + } +) +@validate_transaction_metrics( + name="test_embeddings_error_v1:test_embeddings_invalid_request_error_no_model_async", + scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], + rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_custom_events(no_model_events) +@validate_custom_event_count(count=1) +@background_task() +def test_embeddings_invalid_request_error_no_model_async(set_trace_info, async_openai_client, loop): + with pytest.raises(TypeError): + set_trace_info() + loop.run_until_complete( + async_openai_client.embeddings.create(input="This is an embedding test with no model.") + ) # no model provided + + +invalid_model_events = [ + ( + {"type": "LlmEmbedding"}, + { + "id": None, # UUID that varies with each run + "span_id": None, + "trace_id": "trace-id", + "input": "Model does not exist.", + "duration": None, # Response time varies each test run + "request.model": "does-not-exist", + "vendor": "openai", + "ingest_source": "Python", + "error": True, + }, + ), +] + + +@dt_enabled +@reset_core_stats_engine() +@override_llm_token_callback_settings(llm_token_count_callback) +@validate_error_trace_attributes( + callable_name(openai.NotFoundError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "http.statusCode": 404, + "error.code": "model_not_found", + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "The model `does-not-exist` does not exist", + } +) +@validate_transaction_metrics( + name="test_embeddings_error_v1:test_embeddings_invalid_request_error_invalid_model_with_token_count", + scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], + rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_custom_events(add_token_count_to_events(invalid_model_events)) +@validate_custom_event_count(count=1) +@background_task() +def test_embeddings_invalid_request_error_invalid_model_with_token_count(set_trace_info, sync_openai_client): + with pytest.raises(openai.NotFoundError): + set_trace_info() + sync_openai_client.embeddings.create(input="Model does not exist.", model="does-not-exist") + + +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(openai.NotFoundError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "http.statusCode": 404, + "error.code": "model_not_found", + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "The model `does-not-exist` does not exist", + } +) +@validate_transaction_metrics( + name="test_embeddings_error_v1:test_embeddings_invalid_request_error_invalid_model", + scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], + rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_custom_events(invalid_model_events) +@validate_custom_event_count(count=1) +@background_task() +def test_embeddings_invalid_request_error_invalid_model(set_trace_info, sync_openai_client): + with pytest.raises(openai.NotFoundError): + set_trace_info() + sync_openai_client.embeddings.create(input="Model does not exist.", model="does-not-exist") + + +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(openai.NotFoundError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "http.statusCode": 404, + "error.code": "model_not_found", + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "The model `does-not-exist` does not exist", + } +) +@validate_transaction_metrics( + name="test_embeddings_error_v1:test_embeddings_invalid_request_error_invalid_model_async", + scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], + rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_custom_events(invalid_model_events) +@validate_custom_event_count(count=1) +@background_task() +def test_embeddings_invalid_request_error_invalid_model_async(set_trace_info, async_openai_client, loop): + with pytest.raises(openai.NotFoundError): + set_trace_info() + loop.run_until_complete( + async_openai_client.embeddings.create(input="Model does not exist.", model="does-not-exist") + ) + + +@dt_enabled +@reset_core_stats_engine() +@disabled_ai_monitoring_record_content_settings +@validate_error_trace_attributes( + callable_name(openai.NotFoundError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "http.statusCode": 404, + "error.code": "model_not_found", + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "The model `does-not-exist` does not exist", + } +) +@validate_transaction_metrics( + name="test_embeddings_error_v1:test_embeddings_invalid_request_error_invalid_model_async_no_content", + scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], + rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_custom_events(events_sans_content(invalid_model_events)) +@validate_custom_event_count(count=1) +@background_task() +def test_embeddings_invalid_request_error_invalid_model_async_no_content(set_trace_info, async_openai_client, loop): + with pytest.raises(openai.NotFoundError): + set_trace_info() + loop.run_until_complete( + async_openai_client.embeddings.create(input="Model does not exist.", model="does-not-exist") + ) + + +@dt_enabled +@reset_core_stats_engine() +@override_llm_token_callback_settings(llm_token_count_callback) +@validate_error_trace_attributes( + callable_name(openai.NotFoundError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "http.statusCode": 404, + "error.code": "model_not_found", + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "The model `does-not-exist` does not exist", + } +) +@validate_transaction_metrics( + name="test_embeddings_error_v1:test_embeddings_invalid_request_error_invalid_model_async_with_token_count", + scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], + rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_custom_events(add_token_count_to_events(invalid_model_events)) +@validate_custom_event_count(count=1) +@background_task() +def test_embeddings_invalid_request_error_invalid_model_async_with_token_count( + set_trace_info, async_openai_client, loop +): + with pytest.raises(openai.NotFoundError): + set_trace_info() + + loop.run_until_complete( + async_openai_client.embeddings.create(input="Model does not exist.", model="does-not-exist") + ) + + +embedding_invalid_key_error_events = [ + ( + {"type": "LlmEmbedding"}, + { + "id": None, # UUID that varies with each run + "span_id": None, + "trace_id": "trace-id", + "input": "Invalid API key.", + "duration": None, # Response time varies each test run + "request.model": "text-embedding-ada-002", + "vendor": "openai", + "ingest_source": "Python", + "error": True, + }, + ), +] + + +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(openai.AuthenticationError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "http.statusCode": 401, + "error.code": "invalid_api_key", + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "Incorrect API key provided: DEADBEEF. You can find your API key at https://platform.openai.com/account/api-keys.", + } +) +@validate_transaction_metrics( + name="test_embeddings_error_v1:test_embeddings_wrong_api_key_error", + scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], + rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_custom_events(embedding_invalid_key_error_events) +@validate_custom_event_count(count=1) +@background_task() +def test_embeddings_wrong_api_key_error(set_trace_info, monkeypatch, sync_openai_client): + with pytest.raises(openai.AuthenticationError): + set_trace_info() + monkeypatch.setattr(sync_openai_client, "api_key", "DEADBEEF") + sync_openai_client.embeddings.create(input="Invalid API key.", model="text-embedding-ada-002") + + +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(openai.AuthenticationError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "http.statusCode": 401, + "error.code": "invalid_api_key", + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "Incorrect API key provided: DEADBEEF. You can find your API key at https://platform.openai.com/account/api-keys.", + } +) +@validate_transaction_metrics( + name="test_embeddings_error_v1:test_embeddings_wrong_api_key_error_async", + scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], + rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_custom_events(embedding_invalid_key_error_events) +@validate_custom_event_count(count=1) +@background_task() +def test_embeddings_wrong_api_key_error_async(set_trace_info, monkeypatch, async_openai_client, loop): + with pytest.raises(openai.AuthenticationError): + set_trace_info() + monkeypatch.setattr(async_openai_client, "api_key", "DEADBEEF") + loop.run_until_complete( + async_openai_client.embeddings.create(input="Invalid API key.", model="text-embedding-ada-002") + ) diff --git a/tests/mlmodel_openai/test_embeddings_stream_v1.py b/tests/mlmodel_openai/test_embeddings_stream_v1.py new file mode 100644 index 000000000..3ea8be4aa --- /dev/null +++ b/tests/mlmodel_openai/test_embeddings_stream_v1.py @@ -0,0 +1,74 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +from conftest import get_openai_version # pylint: disable=E0611 +from testing_support.fixtures import ( + reset_core_stats_engine, + validate_custom_event_count, +) +from testing_support.ml_testing_utils import set_trace_info # noqa: F401 + +from newrelic.api.background_task import background_task + +OPENAI_VERSION = get_openai_version() +SKIP_IF_NO_OPENAI_EMBEDDING_STREAMING_SUPPORT = pytest.mark.skipif( + OPENAI_VERSION < (1, 8), reason="OpenAI does not support embedding streaming until v1.8" +) + + +@SKIP_IF_NO_OPENAI_EMBEDDING_STREAMING_SUPPORT +@reset_core_stats_engine() +@validate_custom_event_count(count=0) +@background_task() +def test_openai_embedding_sync(set_trace_info, sync_openai_stream_client): + """ + Does not instrument streamed embeddings. + """ + set_trace_info() + with sync_openai_stream_client.embeddings.create( + input="This is an embedding test.", model="text-embedding-ada-002" + ) as response: + for resp in response.iter_lines(): + assert resp + + +@SKIP_IF_NO_OPENAI_EMBEDDING_STREAMING_SUPPORT +@reset_core_stats_engine() +@validate_custom_event_count(count=0) +@background_task() +def test_openai_embedding_async(loop, set_trace_info, async_openai_stream_client): + """ + Does not instrumenting streamed embeddings. + """ + set_trace_info() + + async def test(): + async with async_openai_stream_client.embeddings.create( + input="This is an embedding test.", model="text-embedding-ada-002" + ) as response: + async for resp in response.iter_lines(): + assert resp + + loop.run_until_complete(test()) + + +@pytest.fixture +def sync_openai_stream_client(sync_openai_client, openai_version): + return sync_openai_client.with_streaming_response + + +@pytest.fixture +def async_openai_stream_client(async_openai_client, openai_version): + return async_openai_client.with_streaming_response diff --git a/tests/mlmodel_openai/test_embeddings_v1.py b/tests/mlmodel_openai/test_embeddings_v1.py new file mode 100644 index 000000000..38777e2c5 --- /dev/null +++ b/tests/mlmodel_openai/test_embeddings_v1.py @@ -0,0 +1,219 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import openai +from testing_support.fixtures import ( + override_llm_token_callback_settings, + reset_core_stats_engine, + validate_attributes, + validate_custom_event_count, +) +from testing_support.ml_testing_utils import ( # noqa: F401 + add_token_count_to_events, + disabled_ai_monitoring_record_content_settings, + disabled_ai_monitoring_settings, + events_sans_content, + llm_token_count_callback, + set_trace_info, +) +from testing_support.validators.validate_custom_events import validate_custom_events +from testing_support.validators.validate_transaction_metrics import ( + validate_transaction_metrics, +) + +from newrelic.api.background_task import background_task + +embedding_recorded_events = [ + ( + {"type": "LlmEmbedding"}, + { + "id": None, # UUID that varies with each run + "span_id": None, + "trace_id": "trace-id", + "input": "This is an embedding test.", + "duration": None, # Response time varies each test run + "response.model": "text-embedding-ada-002-v2", + "request.model": "text-embedding-ada-002", + "request_id": "fef7adee5adcfb03c083961bdce4f6a4", + "response.organization": "foobar-jtbczk", + "response.headers.llmVersion": "2020-10-01", + "response.headers.ratelimitLimitRequests": 200, + "response.headers.ratelimitLimitTokens": 150000, + "response.headers.ratelimitResetTokens": "2ms", + "response.headers.ratelimitResetRequests": "19m5.228s", + "response.headers.ratelimitRemainingTokens": 149993, + "response.headers.ratelimitRemainingRequests": 197, + "vendor": "openai", + "ingest_source": "Python", + }, + ), +] + + +@reset_core_stats_engine() +@validate_custom_events(embedding_recorded_events) +@validate_custom_event_count(count=1) +@validate_transaction_metrics( + name="test_embeddings_v1:test_openai_embedding_sync", + scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], + rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_attributes("agent", ["llm"]) +@background_task() +def test_openai_embedding_sync(set_trace_info, sync_openai_client): + set_trace_info() + sync_openai_client.embeddings.create(input="This is an embedding test.", model="text-embedding-ada-002") + + +@reset_core_stats_engine() +@disabled_ai_monitoring_record_content_settings +@validate_custom_events(events_sans_content(embedding_recorded_events)) +@validate_custom_event_count(count=1) +@validate_transaction_metrics( + name="test_embeddings_v1:test_openai_embedding_sync_no_content", + scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], + rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_attributes("agent", ["llm"]) +@background_task() +def test_openai_embedding_sync_no_content(set_trace_info, sync_openai_client): + set_trace_info() + sync_openai_client.embeddings.create(input="This is an embedding test.", model="text-embedding-ada-002") + + +@reset_core_stats_engine() +@override_llm_token_callback_settings(llm_token_count_callback) +@validate_custom_events(add_token_count_to_events(embedding_recorded_events)) +@validate_custom_event_count(count=1) +@validate_transaction_metrics( + name="test_embeddings_v1:test_openai_embedding_sync_with_token_count", + scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], + rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_attributes("agent", ["llm"]) +@background_task() +def test_openai_embedding_sync_with_token_count(set_trace_info, sync_openai_client): + set_trace_info() + sync_openai_client.embeddings.create(input="This is an embedding test.", model="text-embedding-ada-002") + + +@reset_core_stats_engine() +@validate_custom_event_count(count=0) +def test_openai_embedding_sync_outside_txn(sync_openai_client): + sync_openai_client.embeddings.create(input="This is an embedding test.", model="text-embedding-ada-002") + + +@disabled_ai_monitoring_settings +@reset_core_stats_engine() +@validate_custom_event_count(count=0) +@background_task() +def test_openai_embedding_sync_ai_monitoring_disabled(sync_openai_client): + sync_openai_client.embeddings.create(input="This is an embedding test.", model="text-embedding-ada-002") + + +@reset_core_stats_engine() +@validate_custom_events(embedding_recorded_events) +@validate_custom_event_count(count=1) +@validate_transaction_metrics( + name="test_embeddings_v1:test_openai_embedding_async", + scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], + rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_attributes("agent", ["llm"]) +@background_task() +def test_openai_embedding_async(loop, set_trace_info, async_openai_client): + set_trace_info() + + loop.run_until_complete( + async_openai_client.embeddings.create(input="This is an embedding test.", model="text-embedding-ada-002") + ) + + +@reset_core_stats_engine() +@disabled_ai_monitoring_record_content_settings +@validate_custom_events(events_sans_content(embedding_recorded_events)) +@validate_custom_event_count(count=1) +@validate_transaction_metrics( + name="test_embeddings_v1:test_openai_embedding_async_no_content", + scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], + rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_attributes("agent", ["llm"]) +@background_task() +def test_openai_embedding_async_no_content(loop, set_trace_info, async_openai_client): + set_trace_info() + + loop.run_until_complete( + async_openai_client.embeddings.create(input="This is an embedding test.", model="text-embedding-ada-002") + ) + + +@reset_core_stats_engine() +@override_llm_token_callback_settings(llm_token_count_callback) +@validate_custom_events(add_token_count_to_events(embedding_recorded_events)) +@validate_custom_event_count(count=1) +@validate_transaction_metrics( + name="test_embeddings_v1:test_openai_embedding_async_with_token_count", + scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], + rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_attributes("agent", ["llm"]) +@background_task() +def test_openai_embedding_async_with_token_count(set_trace_info, loop, async_openai_client): + set_trace_info() + loop.run_until_complete( + async_openai_client.embeddings.create(input="This is an embedding test.", model="text-embedding-ada-002") + ) + + +@reset_core_stats_engine() +@validate_custom_event_count(count=0) +def test_openai_embedding_async_outside_transaction(loop, async_openai_client): + loop.run_until_complete( + async_openai_client.embeddings.create(input="This is an embedding test.", model="text-embedding-ada-002") + ) + + +@disabled_ai_monitoring_settings +@reset_core_stats_engine() +@validate_custom_event_count(count=0) +@background_task() +def test_openai_embedding_async_ai_monitoring_disabled(loop, async_openai_client): + loop.run_until_complete( + async_openai_client.embeddings.create(input="This is an embedding test.", model="text-embedding-ada-002") + ) diff --git a/tests/testing_support/external_fixtures.py b/tests/testing_support/external_fixtures.py index de746c38b..52b57d401 100644 --- a/tests/testing_support/external_fixtures.py +++ b/tests/testing_support/external_fixtures.py @@ -30,35 +30,32 @@ def create_incoming_headers(transaction): headers = [] - cross_process_id = '1#2' - path = 'test' + cross_process_id = "1#2" + path = "test" queue_time = 1.0 duration = 2.0 read_length = 1024 - guid = '0123456789012345' + guid = "0123456789012345" record_tt = False - payload = (cross_process_id, path, queue_time, duration, read_length, - guid, record_tt) + payload = (cross_process_id, path, queue_time, duration, read_length, guid, record_tt) app_data = json_encode(payload) value = obfuscate(app_data, encoding_key) - assert isinstance(value, type('')) + assert isinstance(value, type("")) - headers.append(('X-NewRelic-App-Data', value)) + headers.append(("X-NewRelic-App-Data", value)) return headers def validate_synthetics_external_trace_header( - synthetics_header, - synthetics_info_header, - ): - @transient_function_wrapper('newrelic.core.stats_engine', - 'StatsEngine.record_transaction') - def _validate_synthetics_external_trace_header(wrapped, instance, - args, kwargs): + synthetics_header, + synthetics_info_header, +): + @transient_function_wrapper("newrelic.core.stats_engine", "StatsEngine.record_transaction") + def _validate_synthetics_external_trace_header(wrapped, instance, args, kwargs): def _bind_params(transaction, *args, **kwargs): return transaction @@ -91,31 +88,29 @@ def __init__(self, wrapped): def __getattr__(self, name): return getattr(self.__wrapped__, name, lambda *args, **kwargs: None) - external_headers = ExternalTrace.generate_request_headers( - _Transaction(transaction)) + external_headers = ExternalTrace.generate_request_headers(_Transaction(transaction)) external_headers = {header[0]: header[1] for header in external_headers} if synthetics_header: - assert synthetics_header == external_headers["X-NewRelic-Synthetics"], ( - 'synthetics_header=%r, external_headers=%r' % ( - synthetics_header, external_headers)) + assert ( + synthetics_header == external_headers["X-NewRelic-Synthetics"] + ), "synthetics_header=%r, external_headers=%r" % (synthetics_header, external_headers) else: assert "X-NewRelic-Synthetics" not in external_headers if synthetics_info_header: - assert synthetics_info_header == external_headers["X-NewRelic-Synthetics-Info"], ( - 'synthetics_info_header=%r, external_headers=%r' % ( - synthetics_info_header, external_headers)) + assert ( + synthetics_info_header == external_headers["X-NewRelic-Synthetics-Info"] + ), "synthetics_info_header=%r, external_headers=%r" % (synthetics_info_header, external_headers) else: assert "X-NewRelic-Synthetics-Info" not in external_headers - return result return _validate_synthetics_external_trace_header -@transient_function_wrapper(httplib.__name__, 'HTTPConnection.putheader') +@transient_function_wrapper(httplib.__name__, "HTTPConnection.putheader") def cache_outgoing_headers(wrapped, instance, args, kwargs): def _bind_params(header, *values): return header, values @@ -140,7 +135,7 @@ def _bind_params(header, *values): return wrapped(*args, **kwargs) -@transient_function_wrapper(httplib.__name__, 'HTTPResponse.getheaders') +@transient_function_wrapper(httplib.__name__, "HTTPResponse.getheaders") def insert_incoming_headers(wrapped, instance, args, kwargs): transaction = current_transaction() @@ -151,4 +146,4 @@ def insert_incoming_headers(wrapped, instance, args, kwargs): headers.extend(create_incoming_headers(transaction)) - return headers \ No newline at end of file + return headers diff --git a/tests/testing_support/fixtures.py b/tests/testing_support/fixtures.py index 297a1ff8a..d77d4bdc0 100644 --- a/tests/testing_support/fixtures.py +++ b/tests/testing_support/fixtures.py @@ -39,6 +39,7 @@ application_settings, register_application, ) +from newrelic.api.ml_model import set_llm_token_count_callback from newrelic.common.agent_http import DeveloperModeClient from newrelic.common.encoding_utils import json_encode, obfuscate from newrelic.common.object_names import callable_name @@ -894,6 +895,59 @@ def _validate_application_exception_message(wrapped, instance, args, kwargs): return _validate_application_exception_message +def _validate_custom_event(recorded_event, required_event): + assert len(recorded_event) == 2 # [intrinsic, user attributes] + + intrinsics = recorded_event[0] + + assert intrinsics["type"] == required_event[0]["type"] + + now = time.time() + assert isinstance(intrinsics["timestamp"], int) + assert intrinsics["timestamp"] <= 1000.0 * now + assert intrinsics["timestamp"] >= 1000.0 * required_event[0]["timestamp"] + + assert recorded_event[1].items() == required_event[1].items() + + +def validate_custom_event_in_application_stats_engine(required_event): + @function_wrapper + def _validate_custom_event_in_application_stats_engine(wrapped, instance, args, kwargs): + try: + result = wrapped(*args, **kwargs) + except: + raise + else: + stats = core_application_stats_engine(None) + assert stats.custom_events.num_samples == 1 + + custom_event = next(iter(stats.custom_events)) + _validate_custom_event(custom_event, required_event) + + return result + + return _validate_custom_event_in_application_stats_engine + + +def validate_custom_event_count(count): + @function_wrapper + def _validate_custom_event_count(wrapped, instance, args, kwargs): + try: + result = wrapped(*args, **kwargs) + except: + raise + else: + stats = core_application_stats_engine(None) + assert stats.custom_events.num_samples == count, "Expected %d, got %d" % ( + count, + stats.custom_events.num_samples, + ) + + return result + + return _validate_custom_event_count + + def _validate_node_parenting(node, expected_node): assert node.exclusive >= 0, "node.exclusive = %s" % node.exclusive @@ -1024,6 +1078,27 @@ def _override_application_settings(wrapped, instance, args, kwargs): return _override_application_settings +def override_llm_token_callback_settings(callback): + @function_wrapper + def _override_llm_token_callback_settings(wrapped, instance, args, kwargs): + # The settings object has references from a number of + # different places. We have to create a copy, overlay + # the temporary settings and then when done clear the + # top level settings object and rebuild it when done. + original_settings = application_settings() + backup = copy.deepcopy(original_settings.__dict__) + + try: + set_llm_token_count_callback(callback, application_instance()) + + return wrapped(*args, **kwargs) + finally: + original_settings.__dict__.clear() + original_settings.__dict__.update(backup) + + return _override_llm_token_callback_settings + + def override_generic_settings(settings_object, overrides): @function_wrapper def _override_generic_settings(wrapped, instance, args, kwargs): diff --git a/tests/testing_support/ml_testing_utils.py b/tests/testing_support/ml_testing_utils.py new file mode 100644 index 000000000..a9a74af17 --- /dev/null +++ b/tests/testing_support/ml_testing_utils.py @@ -0,0 +1,65 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import copy + +import pytest +from testing_support.fixtures import override_application_settings + +from newrelic.api.transaction import current_transaction + +disabled_ai_monitoring_settings = override_application_settings({"ai_monitoring.enabled": False}) +disabled_ai_monitoring_streaming_settings = override_application_settings({"ai_monitoring.streaming.enabled": False}) +disabled_ai_monitoring_record_content_settings = override_application_settings( + {"ai_monitoring.record_content.enabled": False} +) + + +def llm_token_count_callback(model, content): + return 105 + + +def add_token_count_to_events(expected_events): + events = copy.deepcopy(expected_events) + for event in events: + if event[0]["type"] != "LlmChatCompletionSummary": + event[1]["token_count"] = 105 + return events + + +def events_sans_content(event): + new_event = copy.deepcopy(event) + for _event in new_event: + if "input" in _event[1]: + del _event[1]["input"] + elif "content" in _event[1]: + del _event[1]["content"] + return new_event + + +def events_sans_llm_metadata(expected_events): + events = copy.deepcopy(expected_events) + for event in events: + del event[1]["llm.conversation_id"], event[1]["llm.foo"] + return events + + +@pytest.fixture(scope="session") +def set_trace_info(): + def _set_trace_info(): + txn = current_transaction() + if txn: + txn.guid = "transaction-id" + txn._trace_id = "trace-id" + + return _set_trace_info diff --git a/tests/testing_support/validators/validate_custom_events.py b/tests/testing_support/validators/validate_custom_events.py new file mode 100644 index 000000000..206ce08f1 --- /dev/null +++ b/tests/testing_support/validators/validate_custom_events.py @@ -0,0 +1,109 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +import time + +from testing_support.fixtures import catch_background_exceptions + +from newrelic.common.object_wrapper import function_wrapper, transient_function_wrapper +from newrelic.packages import six + + +def validate_custom_events(events): + @function_wrapper + def _validate_wrapper(wrapped, instance, args, kwargs): + record_called = [] + recorded_events = [] + + @transient_function_wrapper("newrelic.core.stats_engine", "StatsEngine.record_transaction") + @catch_background_exceptions + def _validate_custom_events(wrapped, instance, args, kwargs): + record_called.append(True) + try: + result = wrapped(*args, **kwargs) + except: + raise + recorded_events[:] = [] + recorded_events.extend(list(instance._custom_events)) + + return result + + _new_wrapper = _validate_custom_events(wrapped) + val = _new_wrapper(*args, **kwargs) + assert record_called + found_events = copy.copy(recorded_events) + + record_called[:] = [] + recorded_events[:] = [] + + for expected in events: + matching_custom_events = 0 + mismatches = [] + for captured in found_events: + if _check_event_attributes(expected, captured, mismatches): + matching_custom_events += 1 + assert matching_custom_events == 1, _event_details(matching_custom_events, found_events, mismatches) + + return val + + return _validate_wrapper + + +def _check_event_attributes(expected, captured, mismatches): + assert len(captured) == 2 # [intrinsic, user attributes] + + intrinsics = captured[0] + + if intrinsics["type"] != expected[0]["type"]: + mismatches.append("key: type, value:<%s><%s>" % (expected[0]["type"], captured[0].get("type", None))) + return False + + now = time.time() + + if not (isinstance(intrinsics["timestamp"], int) and intrinsics["timestamp"] <= 1000.0 * now): + mismatches.append("key: timestamp, value:<%s>" % intrinsics["timestamp"]) + return False + + captured_keys = set(six.iterkeys(captured[1])) + expected_keys = set(six.iterkeys(expected[1])) + extra_keys = captured_keys - expected_keys + + if extra_keys: + mismatches.append("extra_keys: %s" % str(tuple(extra_keys))) + return False + + for key, value in six.iteritems(expected[1]): + if key in captured[1]: + captured_value = captured[1].get(key, None) + else: + mismatches.append("key: %s, value:<%s><%s>" % (key, value, captured[1].get(key, None))) + return False + + if value is not None: + if value != captured_value: + mismatches.append("key: %s, value:<%s><%s>" % (key, value, captured_value)) + return False + + return True + + +def _event_details(matching_custom_events, captured, mismatches): + details = [ + "matching_custom_events=%d" % matching_custom_events, + "mismatches=%s" % mismatches, + "captured_events=%s" % captured, + ] + + return "\n".join(details) diff --git a/tests/testing_support/validators/validate_ml_event_payload.py b/tests/testing_support/validators/validate_ml_event_payload.py index 4d43cbb22..9933b85f6 100644 --- a/tests/testing_support/validators/validate_ml_event_payload.py +++ b/tests/testing_support/validators/validate_ml_event_payload.py @@ -41,23 +41,36 @@ def payload_to_ml_events(payload): else: message = payload - resource_logs = message.get("resource_logs") - assert len(resource_logs) == 1 - resource_logs = resource_logs[0] - resource = resource_logs.get("resource") - assert resource and resource.get("attributes")[0] == { - "key": "instrumentation.provider", - "value": {"string_value": "newrelic-opentelemetry-python-ml"}, - } - scope_logs = resource_logs.get("scope_logs") - assert len(scope_logs) == 1 - scope_logs = scope_logs[0] - - scope = scope_logs.get("scope") - assert scope is None - logs = scope_logs.get("log_records") - - return logs + inference_logs = [] + apm_logs = [] + resource_log_records = message.get("resource_logs") + for resource_logs in resource_log_records: + resource = resource_logs.get("resource") + assert resource + resource_attrs = resource.get("attributes") + assert { + "key": "instrumentation.provider", + "value": {"string_value": "newrelic-opentelemetry-python-ml"}, + } in resource_attrs + scope_logs = resource_logs.get("scope_logs") + assert len(scope_logs) == 1 + scope_logs = scope_logs[0] + + scope = scope_logs.get("scope") + assert scope is None + logs = scope_logs.get("log_records") + event_name = get_event_name(logs) + if event_name == "InferenceEvent": + inference_logs = logs + else: + # Make sure apm entity attrs are present on the resource. + expected_apm_keys = ("entity.type", "entity.name", "entity.guid", "hostname", "instrumentation.provider") + assert all(attr["key"] in expected_apm_keys for attr in resource_attrs) + assert all(attr["value"] not in ("", None) for attr in resource_attrs) + + apm_logs = logs + + return inference_logs, apm_logs def validate_ml_event_payload(ml_events=None): @@ -86,19 +99,34 @@ def _bind_params(method, payload=(), *args, **kwargs): assert recorded_ml_events decoded_payloads = [payload_to_ml_events(payload) for payload in recorded_ml_events] - all_logs = [] - for sent_logs in decoded_payloads: - for data_point in sent_logs: - for key in ("time_unix_nano",): - assert key in data_point, "Invalid log format. Missing key: %s" % key + decoded_inference_payloads = [payload[0] for payload in decoded_payloads] + decoded_apm_payloads = [payload[1] for payload in decoded_payloads] + all_apm_logs = normalize_logs(decoded_apm_payloads) + all_inference_logs = normalize_logs(decoded_inference_payloads) + + for expected_event in ml_events.get("inference", []): + assert expected_event in all_inference_logs, "%s Not Found. Got: %s" % (expected_event, all_inference_logs) + for expected_event in ml_events.get("apm", []): + assert expected_event in all_apm_logs, "%s Not Found. Got: %s" % (expected_event, all_apm_logs) + return val + + return _validate_wrapper + + +def normalize_logs(decoded_payloads): + all_logs = [] + for sent_logs in decoded_payloads: + for data_point in sent_logs: + for key in ("time_unix_nano",): + assert key in data_point, "Invalid log format. Missing key: %s" % key all_logs.append( {attr["key"]: attribute_to_value(attr["value"]) for attr in (data_point.get("attributes") or [])} ) + return all_logs - for expected_event in ml_events: - assert expected_event in all_logs, "%s Not Found. Got: %s" % (expected_event, all_logs) - return val - - return _validate_wrapper +def get_event_name(logs): + for attr in logs[0]["attributes"]: + if attr["key"] == "event.name": + return attr["value"]["string_value"] diff --git a/tests/testing_support/validators/validate_synthetics_event.py b/tests/testing_support/validators/validate_synthetics_event.py index bab176138..478582f9b 100644 --- a/tests/testing_support/validators/validate_synthetics_event.py +++ b/tests/testing_support/validators/validate_synthetics_event.py @@ -18,6 +18,7 @@ transient_function_wrapper, ) + def validate_synthetics_event(required_attrs=None, forgone_attrs=None, should_exist=True): required_attrs = required_attrs or [] forgone_attrs = forgone_attrs or [] @@ -68,4 +69,3 @@ def wrapper(wrapped, instance, args, kwargs): return result return wrapper - diff --git a/tox.ini b/tox.ini index cd3bb4549..1c4bdf8f6 100644 --- a/tox.ini +++ b/tox.ini @@ -102,9 +102,10 @@ envlist = python-cross_agent-{py27,py37,py38,py39,py310,py311,py312}-{with,without}_extensions, python-cross_agent-pypy27-without_extensions, python-datastore_sqlite-{py27,py37,py38,py39,py310,py311,py312,pypy27,pypy310}, - python-external_botocore-py311-botocore128, python-external_botocore-{py38,py39,py310,py311,py312}-botocorelatest, + python-external_botocore-{py311}-botocorelatest-langchain, python-external_botocore-py310-botocore0125, + python-external_botocore-py311-botocore128, python-external_feedparser-py27-feedparser{05,06}, python-external_http-{py27,py37,py38,py39,py310,py311,py312,pypy27}, python-external_httplib-{py27,py37,py38,py39,py310,py311,py312,pypy27,pypy310}, @@ -145,6 +146,11 @@ envlist = python-logger_loguru-{py37,py38,py39,py310,py311,py312,pypy310}-logurulatest, python-logger_loguru-py39-loguru{06,05}, python-logger_structlog-{py37,py38,py39,py310,py311,py312,pypy310}-structloglatest, + python-mlmodel_openai-openai0-{py37,py38,py39,py310,py311,py312}, + python-mlmodel_openai-openai107-py312, + python-mlmodel_openai-openailatest-{py37,py38,py39,py310,py311,py312}, + ; langchain dependency faiss-cpu isn't compatible with 3.12 yet. + python-mlmodel_langchain-{py38,py39,py310,py311}, python-mlmodel_sklearn-{py37}-scikitlearn0101, python-mlmodel_sklearn-{py38,py39,py310,py311,py312}-scikitlearnlatest, python-template_genshi-{py27,py37,py38,py39,py310,py311,py312}-genshilatest, @@ -209,8 +215,8 @@ deps = component_flask_rest: flask-restful component_flask_rest: jinja2 component_flask_rest: itsdangerous - component_flask_rest-flaskrestxlatest: flask-restx component_flask_rest-flaskrestxlatest: flask + component_flask_rest-flaskrestxlatest: flask-restx ; flask-restx only supports Flask v3 after flask-restx v1.3.0 component_flask_rest-flaskrestx110: Flask<3.0 component_flask_rest-flaskrestx110: flask-restx<1.2 @@ -254,6 +260,7 @@ deps = datastore_redis-redis0400: redis<4.1 external_botocore-botocorelatest: botocore external_botocore-botocorelatest: boto3 + external_botocore-botocorelatest-langchain: langchain external_botocore-botocore128: botocore<1.29 external_botocore-botocore0125: botocore<1.26 external_botocore-{py38,py39,py310,py311,py312}: moto @@ -323,7 +330,7 @@ deps = framework_sanic-sanic{200904,210300,2109,2112,2203,2290}: websockets<11 ; For test_exception_in_middleware test, anyio is used: ; https://github.com/encode/starlette/pull/1157 - ; but anyiolatest creates breaking changes to our tests + ; but anyiolatest creates breaking changes to our tests ; (but not the instrumentation): ; https://github.com/agronholm/anyio/releases/tag/4.0.0 framework_starlette: anyio<4 @@ -338,6 +345,21 @@ deps = framework_tornado: pycurl framework_tornado-tornadolatest: tornado framework_tornado-tornadomaster: https://github.com/tornadoweb/tornado/archive/master.zip + mlmodel_openai-openai0: openai[datalib]<1.0 + mlmodel_openai-openai107: openai[datalib]<1.8 + mlmodel_openai-openailatest: openai[datalib] + ; Required for openai testing + mlmodel_openai: protobuf + mlmodel_langchain: langchain + mlmodel_langchain: langchain-community + mlmodel_langchain: openai[datalib] + ; Required for langchain testing + mlmodel_langchain: pypdf + mlmodel_langchain: tiktoken + mlmodel_langchain: faiss-cpu + mlmodel_langchain: mock + mlmodel_langchain: asyncio + mlmodel_langchain: langchain-openai logger_loguru-logurulatest: loguru logger_loguru-loguru06: loguru<0.7 logger_loguru-loguru05: loguru<0.6 @@ -456,6 +478,8 @@ changedir = framework_starlette: tests/framework_starlette framework_strawberry: tests/framework_strawberry framework_tornado: tests/framework_tornado + mlmodel_openai: tests/mlmodel_openai + mlmodel_langchain: tests/mlmodel_langchain logger_logging: tests/logger_logging logger_loguru: tests/logger_loguru logger_structlog: tests/logger_structlog