Skip to content
This repository was archived by the owner on Nov 16, 2023. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions azure_monitor/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,8 @@

- Remove request metrics from auto-collection
([#124](https://github.com/microsoft/opentelemetry-azure-monitor-python/pull/124))
- Implement standard metrics for http dependency telemetry
([#125](https://github.com/microsoft/opentelemetry-azure-monitor-python/pull/125))

## 0.5b.0
Released 2020-09-24
Expand Down
33 changes: 33 additions & 0 deletions azure_monitor/examples/metrics/standard.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# pylint: disable=import-error
# pylint: disable=no-member
# pylint: disable=no-name-in-module
import time

import requests
from opentelemetry import metrics
from opentelemetry.instrumentation.requests import RequestsInstrumentor
from opentelemetry.sdk.metrics import MeterProvider

from azure_monitor import AzureMonitorMetricsExporter

# Use the default sdk implementation
metrics.set_meter_provider(MeterProvider(stateful=False))

# Track telemetry from the requests library
RequestsInstrumentor().instrument()
meter = RequestsInstrumentor().meter
exporter = AzureMonitorMetricsExporter(
connection_string="InstrumentationKey=<INSTRUMENTATION KEY HERE>"
)
# Export standard metrics from requests library to Azure Monitor
metrics.get_meter_provider().start_pipeline(meter, exporter, 5)

for x in range(10):
for y in range(10):
requests.get("http://example.com")
time.sleep(2)
time.sleep(5)

input("Press any key to exit...")
55 changes: 54 additions & 1 deletion azure_monitor/src/azure_monitor/export/metrics/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,10 @@ class AzureMonitorMetricsExporter(BaseExporter, MetricsExporter):
options: :doc:`export.options` to allow configuration for the exporter
"""

def __init__(self, **options):
super().__init__(**options)
self.add_telemetry_processor(standard_metrics_processor)

def export(
self, metric_records: Sequence[MetricRecord]
) -> MetricsExportResult:
Expand Down Expand Up @@ -73,13 +77,19 @@ def _metric_to_envelope(
)
envelope.name = "Microsoft.ApplicationInsights.Metric"
value = 0
_min = None
_max = None
count = None
metric = metric_record.instrument
if isinstance(metric, ValueObserver):
# mmscl
value = metric_record.aggregator.checkpoint.last
elif isinstance(metric, ValueRecorder):
# mmsc
value = metric_record.aggregator.checkpoint.count
value = metric_record.aggregator.checkpoint.sum
_min = metric_record.aggregator.checkpoint.min
_max = metric_record.aggregator.checkpoint.max
count = metric_record.aggregator.checkpoint.count
else:
# sum or lv
value = metric_record.aggregator.checkpoint
Expand All @@ -90,6 +100,9 @@ def _metric_to_envelope(
ns=metric.description,
name=metric.name,
value=value,
min=_min,
max=_max,
count=count,
kind=protocol.DataPointType.MEASUREMENT.value,
)

Expand All @@ -99,3 +112,43 @@ def _metric_to_envelope(
data = protocol.MetricData(metrics=[data_point], properties=properties)
envelope.data = protocol.Data(base_data=data, base_type="MetricData")
return envelope


def standard_metrics_processor(envelope):
data = envelope.data.base_data
if data.metrics:
properties = {}
point = data.metrics[0]
if point.name == "http.client.duration":
point.name = "Dependency duration"
point.kind = protocol.DataPointType.AGGREGATION.value
properties["_MS.MetricId"] = "dependencies/duration"
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Looks like we are getting more AI properties like this one and others, it would be good to have them somewhere as constants to avoid bugs regarding typos

Copy link
Contributor Author

@lzchen lzchen Oct 5, 2020

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Moving AI properties to a constants file I feel implies that they are a "global" accessible variable. These properties are only relevant to standard metric implementation and so should exist only in that scope (within the metrics file). Perhaps it will be more suitable at the top of the module as constants, but until there are other implementations in other files related to standard metrics, or multiple uses for the constants, I don't see this as much of an improvement.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is not blocking comment, is up to you to handle this, having the constants in the top of the file let you quickly realize we are using these and in case something change is easier to update

properties["_MS.IsAutocollected"] = "True"
properties["cloud/roleInstance"] = utils.azure_monitor_context.get(
"ai.cloud.roleInstance"
)
properties["cloud/roleName"] = utils.azure_monitor_context.get(
"ai.cloud.role"
)
properties["Dependency.Success"] = "False"
if data.properties.get("http.status_code"):
try:
code = int(data.properties.get("http.status_code"))
if 200 <= code < 400:
properties["Dependency.Success"] = "True"
except ValueError:
pass
# TODO: Check other properties if url doesn't exist
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What kind of other properties?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

properties["dependency/target"] = data.properties.get("http.url")
properties["Dependency.Type"] = "HTTP"
properties["dependency/resultCode"] = data.properties.get(
"http.status_code"
)
# Won't need this once Azure Monitor supports histograms
# We can't actually get the individual buckets because the bucket
# collection must happen on the SDK side
properties["dependency/performanceBucket"] = ""
# TODO: OT does not have this in semantic conventions for trace
properties["operation/synthetic"] = ""
# TODO: Add other std. metrics as implemented
data.properties = properties
67 changes: 61 additions & 6 deletions azure_monitor/tests/metrics/test_metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,9 +21,19 @@
from opentelemetry.sdk.util import ns_to_iso_str

from azure_monitor.export import ExportResult
from azure_monitor.export.metrics import AzureMonitorMetricsExporter
from azure_monitor.export.metrics import (
AzureMonitorMetricsExporter,
standard_metrics_processor,
)
from azure_monitor.options import ExporterOptions
from azure_monitor.protocol import Data, DataPoint, Envelope, MetricData
from azure_monitor.protocol import (
Data,
DataPoint,
DataPointType,
Envelope,
MetricData,
)
from azure_monitor.utils import azure_monitor_context

TEST_FOLDER = os.path.abspath(".test")
STORAGE_PATH = os.path.join(TEST_FOLDER)
Expand Down Expand Up @@ -154,6 +164,49 @@ def test_export_exception(self, mte, transmit, logger_mock):
self.assertEqual(result, MetricsExportResult.FAILURE)
self.assertEqual(logger_mock.exception.called, True)

def test_standard_metrics_processor(self):
envelope = mock.Mock()
point = mock.Mock()
point.name = "http.client.duration"
base_data = mock.Mock()
base_data.metrics = [point]
base_data.properties = {
"http.status_code": "200",
"http.url": "http://example.com",
}
envelope.data.base_data = base_data
standard_metrics_processor(envelope)
self.assertEqual(point.name, "Dependency duration")
self.assertEqual(point.kind, DataPointType.AGGREGATION.value)
self.assertEqual(
base_data.properties["_MS.MetricId"], "dependencies/duration"
)
self.assertEqual(base_data.properties["_MS.IsAutocollected"], "True")
role_instance = azure_monitor_context.get("ai.cloud.roleInstance")
role_name = azure_monitor_context.get("ai.cloud.role")
self.assertEqual(
base_data.properties["cloud/roleInstance"], role_instance
)
self.assertEqual(base_data.properties["cloud/roleName"], role_name)
self.assertEqual(base_data.properties["Dependency.Success"], "True")
self.assertEqual(
base_data.properties["dependency/target"], "http://example.com"
)
self.assertEqual(base_data.properties["Dependency.Type"], "HTTP")
self.assertEqual(base_data.properties["dependency/resultCode"], "200")
self.assertEqual(
base_data.properties["dependency/performanceBucket"], ""
)
self.assertEqual(base_data.properties["operation/synthetic"], "")
base_data.properties["http.status_code"] = "500"
point.name = "http.client.duration"
standard_metrics_processor(envelope)
self.assertEqual(base_data.properties["Dependency.Success"], "False")
base_data.properties["http.status_code"] = "asd"
point.name = "http.client.duration"
standard_metrics_processor(envelope)
self.assertEqual(base_data.properties["Dependency.Success"], "False")

def test_metric_to_envelope_none(self):
exporter = self._exporter
self.assertIsNone(exporter._metric_to_envelope(None))
Expand Down Expand Up @@ -200,7 +253,6 @@ def test_observer_to_envelope(self):
aggregator.update(123)
aggregator.take_checkpoint()
record = MetricRecord(self._test_obs, self._test_labels, aggregator)
print(record.labels)
exporter = self._exporter
envelope = exporter._metric_to_envelope(record)
self.assertIsInstance(envelope, Envelope)
Expand Down Expand Up @@ -259,7 +311,6 @@ def test_observer_to_envelope_value_none(self):
self.assertEqual(envelope.data.base_data.metrics[0].ns, "testdesc")
self.assertEqual(envelope.data.base_data.metrics[0].name, "testname")
self.assertEqual(envelope.data.base_data.metrics[0].value, 0)
print(envelope.data.base_data.properties)
self.assertEqual(
envelope.data.base_data.properties["environment"], "staging"
)
Expand All @@ -273,7 +324,8 @@ def test_observer_to_envelope_value_none(self):

def test_value_recorder_to_envelope(self):
aggregator = MinMaxSumCountAggregator()
aggregator.update(123)
aggregator.update(100)
aggregator.update(300)
aggregator.take_checkpoint()
record = MetricRecord(
self._test_value_recorder, self._test_labels, aggregator
Expand All @@ -298,7 +350,10 @@ def test_value_recorder_to_envelope(self):
self.assertIsInstance(envelope.data.base_data.metrics[0], DataPoint)
self.assertEqual(envelope.data.base_data.metrics[0].ns, "testdesc")
self.assertEqual(envelope.data.base_data.metrics[0].name, "testname")
self.assertEqual(envelope.data.base_data.metrics[0].value, 1)
self.assertEqual(envelope.data.base_data.metrics[0].value, 400)
self.assertEqual(envelope.data.base_data.metrics[0].min, 100)
self.assertEqual(envelope.data.base_data.metrics[0].max, 300)
self.assertEqual(envelope.data.base_data.metrics[0].count, 2)
self.assertEqual(
envelope.data.base_data.properties["environment"], "staging"
)
Expand Down