Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merge main into develop-ai-limited-preview #1072

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/containers/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ RUN echo 'eval "$(pyenv init -)"' >>${HOME}/.bashrc && \
pyenv update

# Install Python
ARG PYTHON_VERSIONS="3.11 3.10 3.9 3.8 3.7 3.12 2.7 pypy2.7-7.3.12 pypy3.8-7.3.11"
ARG PYTHON_VERSIONS="3.11 3.10 3.9 3.8 3.7 3.12 2.7 pypy2.7-7.3.12 pypy3.10-7.3.15"
COPY --chown=1000:1000 --chmod=+x ./install-python.sh /tmp/install-python.sh
RUN /tmp/install-python.sh && \
rm /tmp/install-python.sh
Expand Down
15 changes: 14 additions & 1 deletion newrelic/api/transaction.py
Original file line number Diff line number Diff line change
Expand Up @@ -868,6 +868,11 @@ def trace_intrinsics(self):
if self._loop_time:
i_attrs["eventLoopTime"] = self._loop_time

# `guid` is added here to make it an intrinsic
# that is agnostic to distributed tracing.
if self.guid:
i_attrs["guid"] = self.guid

# Add in special CPU time value for UI to display CPU burn.

# TODO: Disable cpu time value for CPU burn as was
Expand All @@ -886,10 +891,18 @@ def trace_intrinsics(self):
def distributed_trace_intrinsics(self):
i_attrs = {}

# Include this here since guid is now an intrinsic attribute,
# whether or not DT is enabled. In most cases, trace_intrinsics
# is called and, within that, this function is called. However,
# there are cases, such as slow SQL calls in database_node that
# call this function directly, so we want to make sure this is
# included here as well. (as of now, the guid is thought to be
# a distributed tracing intrinsic that should be included elsewhere)
i_attrs["guid"] = self.guid

if not self._settings.distributed_tracing.enabled:
return i_attrs

i_attrs["guid"] = self.guid
i_attrs["sampled"] = self.sampled
i_attrs["priority"] = self.priority
i_attrs["traceId"] = self.trace_id
Expand Down
199 changes: 116 additions & 83 deletions newrelic/core/database_node.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,22 +16,35 @@

import newrelic.core.attribute as attribute
import newrelic.core.trace_node

from newrelic.common import system_info
from newrelic.core.database_utils import sql_statement, explain_plan
from newrelic.core.node_mixin import DatastoreNodeMixin
from newrelic.core.database_utils import explain_plan, sql_statement
from newrelic.core.metric import TimeMetric
from newrelic.core.node_mixin import DatastoreNodeMixin


_SlowSqlNode = namedtuple('_SlowSqlNode',
['duration', 'path', 'request_uri', 'sql', 'sql_format',
'metric', 'dbapi2_module', 'stack_trace', 'connect_params',
'cursor_params', 'sql_parameters', 'execute_params',
'host', 'port_path_or_id', 'database_name', 'params'])
_SlowSqlNode = namedtuple(
"_SlowSqlNode",
[
"duration",
"path",
"request_uri",
"sql",
"sql_format",
"metric",
"dbapi2_module",
"stack_trace",
"connect_params",
"cursor_params",
"sql_parameters",
"execute_params",
"host",
"port_path_or_id",
"database_name",
"params",
],
)


class SlowSqlNode(_SlowSqlNode):

def __new__(cls, *args, **kwargs):
node = _SlowSqlNode.__new__(cls, *args, **kwargs)
node.statement = sql_statement(node.sql, node.dbapi2_module)
Expand All @@ -46,16 +59,33 @@ def identifier(self):
return self.statement.identifier


_DatabaseNode = namedtuple('_DatabaseNode',
['dbapi2_module', 'sql', 'children', 'start_time', 'end_time',
'duration', 'exclusive', 'stack_trace', 'sql_format',
'connect_params', 'cursor_params', 'sql_parameters',
'execute_params', 'host', 'port_path_or_id', 'database_name',
'guid', 'agent_attributes', 'user_attributes'])
_DatabaseNode = namedtuple(
"_DatabaseNode",
[
"dbapi2_module",
"sql",
"children",
"start_time",
"end_time",
"duration",
"exclusive",
"stack_trace",
"sql_format",
"connect_params",
"cursor_params",
"sql_parameters",
"execute_params",
"host",
"port_path_or_id",
"database_name",
"guid",
"agent_attributes",
"user_attributes",
],
)


class DatabaseNode(_DatabaseNode, DatastoreNodeMixin):

def __new__(cls, *args, **kwargs):
node = _DatabaseNode.__new__(cls, *args, **kwargs)
node.statement = sql_statement(node.sql, node.dbapi2_module)
Expand Down Expand Up @@ -86,9 +116,15 @@ def formatted(self):
return self.statement.formatted(self.sql_format)

def explain_plan(self, connections):
return explain_plan(connections, self.statement, self.connect_params,
self.cursor_params, self.sql_parameters, self.execute_params,
self.sql_format)
return explain_plan(
connections,
self.statement,
self.connect_params,
self.cursor_params,
self.sql_parameters,
self.execute_params,
self.sql_format,
)

def time_metrics(self, stats, root, parent):
"""Return a generator yielding the timed metrics for this
Expand All @@ -97,80 +133,74 @@ def time_metrics(self, stats, root, parent):
"""

product = self.product
operation = self.operation or 'other'
operation = self.operation or "other"
target = self.target

# Determine the scoped metric

statement_metric_name = 'Datastore/statement/%s/%s/%s' % (product,
target, operation)
statement_metric_name = "Datastore/statement/%s/%s/%s" % (product, target, operation)

operation_metric_name = 'Datastore/operation/%s/%s' % (product,
operation)
operation_metric_name = "Datastore/operation/%s/%s" % (product, operation)

if target:
scoped_metric_name = statement_metric_name
else:
scoped_metric_name = operation_metric_name

yield TimeMetric(name=scoped_metric_name, scope=root.path,
duration=self.duration, exclusive=self.exclusive)
yield TimeMetric(name=scoped_metric_name, scope=root.path, duration=self.duration, exclusive=self.exclusive)

# Unscoped rollup metrics

yield TimeMetric(name='Datastore/all', scope='',
duration=self.duration, exclusive=self.exclusive)
yield TimeMetric(name="Datastore/all", scope="", duration=self.duration, exclusive=self.exclusive)

yield TimeMetric(name='Datastore/%s/all' % product, scope='',
duration=self.duration, exclusive=self.exclusive)
yield TimeMetric(name="Datastore/%s/all" % product, scope="", duration=self.duration, exclusive=self.exclusive)

if root.type == 'WebTransaction':
yield TimeMetric(name='Datastore/allWeb', scope='',
duration=self.duration, exclusive=self.exclusive)
if root.type == "WebTransaction":
yield TimeMetric(name="Datastore/allWeb", scope="", duration=self.duration, exclusive=self.exclusive)

yield TimeMetric(name='Datastore/%s/allWeb' % product, scope='',
duration=self.duration, exclusive=self.exclusive)
yield TimeMetric(
name="Datastore/%s/allWeb" % product, scope="", duration=self.duration, exclusive=self.exclusive
)
else:
yield TimeMetric(name='Datastore/allOther', scope='',
duration=self.duration, exclusive=self.exclusive)
yield TimeMetric(name="Datastore/allOther", scope="", duration=self.duration, exclusive=self.exclusive)

yield TimeMetric(name='Datastore/%s/allOther' % product, scope='',
duration=self.duration, exclusive=self.exclusive)
yield TimeMetric(
name="Datastore/%s/allOther" % product, scope="", duration=self.duration, exclusive=self.exclusive
)

# Unscoped operation metric

yield TimeMetric(name=operation_metric_name, scope='',
duration=self.duration, exclusive=self.exclusive)
yield TimeMetric(name=operation_metric_name, scope="", duration=self.duration, exclusive=self.exclusive)

# Unscoped statement metric

if target:
yield TimeMetric(name=statement_metric_name, scope='',
duration=self.duration, exclusive=self.exclusive)
yield TimeMetric(name=statement_metric_name, scope="", duration=self.duration, exclusive=self.exclusive)

# Unscoped instance Metric

if self.instance_hostname and self.port_path_or_id:

instance_metric_name = 'Datastore/instance/%s/%s/%s' % (product,
self.instance_hostname, self.port_path_or_id)
instance_metric_name = "Datastore/instance/%s/%s/%s" % (
product,
self.instance_hostname,
self.port_path_or_id,
)

yield TimeMetric(name=instance_metric_name, scope='',
duration=self.duration, exclusive=self.exclusive)
yield TimeMetric(name=instance_metric_name, scope="", duration=self.duration, exclusive=self.exclusive)

def slow_sql_node(self, stats, root):
product = self.product
operation = self.operation or 'other'
operation = self.operation or "other"
target = self.target

if target:
name = 'Datastore/statement/%s/%s/%s' % (product, target,
operation)
name = "Datastore/statement/%s/%s/%s" % (product, target, operation)
else:
name = 'Datastore/operation/%s/%s' % (product, operation)
name = "Datastore/operation/%s/%s" % (product, operation)

request_uri = ''
if root.type == 'WebTransaction':
request_uri = ""
if root.type == "WebTransaction":
request_uri = root.request_uri

params = None
Expand All @@ -182,19 +212,24 @@ def slow_sql_node(self, stats, root):
# explain plan. Only limit the length when sending the
# formatted SQL up to the data collector.

return SlowSqlNode(duration=self.duration, path=root.path,
request_uri=request_uri, sql=self.sql,
sql_format=self.sql_format, metric=name,
dbapi2_module=self.dbapi2_module,
stack_trace=self.stack_trace,
connect_params=self.connect_params,
cursor_params=self.cursor_params,
sql_parameters=self.sql_parameters,
execute_params=self.execute_params,
host=self.instance_hostname,
port_path_or_id=self.port_path_or_id,
database_name=self.database_name,
params=params)
return SlowSqlNode(
duration=self.duration,
path=root.path,
request_uri=request_uri,
sql=self.sql,
sql_format=self.sql_format,
metric=name,
dbapi2_module=self.dbapi2_module,
stack_trace=self.stack_trace,
connect_params=self.connect_params,
cursor_params=self.cursor_params,
sql_parameters=self.sql_parameters,
execute_params=self.execute_params,
host=self.instance_hostname,
port_path_or_id=self.port_path_or_id,
database_name=self.database_name,
params=params,
)

def trace_node(self, stats, root, connections):
name = root.string_table.cache(self.name)
Expand All @@ -209,56 +244,54 @@ def trace_node(self, stats, root, connections):
sql = self.formatted

# Agent attributes
self.agent_attributes['db.instance'] = self.db_instance
self.agent_attributes["db.instance"] = self.db_instance
if sql:
# Limit the length of any SQL that is reported back.

limit = root.settings.agent_limits.sql_query_length_maximum

self.agent_attributes['db.statement'] = sql[:limit]
self.agent_attributes["db.statement"] = sql[:limit]

params = self.get_trace_segment_params(root.settings)

# Only send datastore instance params if not empty.

if self.host:
params['host'] = self.instance_hostname
params["host"] = self.instance_hostname

if self.port_path_or_id:
params['port_path_or_id'] = self.port_path_or_id
params["port_path_or_id"] = self.port_path_or_id

sql = params.get('db.statement')
sql = params.get("db.statement")
if sql:
params['db.statement'] = root.string_table.cache(sql)
params["db.statement"] = root.string_table.cache(sql)

if self.stack_trace:
params['backtrace'] = [root.string_table.cache(x) for x in
self.stack_trace]
params["backtrace"] = [root.string_table.cache(x) for x in self.stack_trace]

# Only perform an explain plan if this node ended up being
# flagged to have an explain plan. This is applied when cap
# on number of explain plans for whole harvest period is
# applied across all transaction traces just prior to the
# transaction traces being generated.

if getattr(self, 'generate_explain_plan', None):
if getattr(self, "generate_explain_plan", None):
explain_plan_data = self.explain_plan(connections)
if explain_plan_data:
params['explain_plan'] = explain_plan_data
params["explain_plan"] = explain_plan_data

return newrelic.core.trace_node.TraceNode(start_time=start_time,
end_time=end_time, name=name, params=params, children=children,
label=None)
return newrelic.core.trace_node.TraceNode(
start_time=start_time, end_time=end_time, name=name, params=params, children=children, label=None
)

def span_event(self, *args, **kwargs):
sql = self.formatted

if sql:
# Truncate to 2000 bytes and append ...
_, sql = attribute.process_user_attribute(
'db.statement', sql, max_length=2000, ending='...')
_, sql = attribute.process_user_attribute("db.statement", sql, max_length=2000, ending="...")

self.agent_attributes['db.statement'] = sql
self.agent_attributes["db.statement"] = sql

if self.target:
self.agent_attributes["db.collection"] = self.target
Expand Down
3 changes: 1 addition & 2 deletions newrelic/core/error_collector.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,5 +14,4 @@

from collections import namedtuple

TracedError = namedtuple('TracedError',
['start_time','path','message','type','parameters'])
TracedError = namedtuple("TracedError", ["start_time", "path", "message", "type", "parameters"])
6 changes: 5 additions & 1 deletion newrelic/core/stats_engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -894,7 +894,11 @@ def notice_error(self, error=None, attributes=None, expected=None, ignore=None,
attributes["agentAttributes"][attr.name] = attr.value

error_details = TracedError(
start_time=time.time(), path="Exception", message=message, type=fullname, parameters=attributes
start_time=time.time(),
path="Exception",
message=message,
type=fullname,
parameters=attributes,
)

# Save this error as a trace and an event.
Expand Down
Loading
Loading