Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .github/PULL_REQUEST_TEMPLATE.md
Original file line number Diff line number Diff line change
Expand Up @@ -32,3 +32,4 @@
- [ ] This PR's changes are covered by the automated tests
- [ ] This PR collects user input/sensitive content into Datadog
- [ ] This PR passes the integration tests (ask a Datadog member to run the tests)
- [ ] This PR passes the unit tests
3 changes: 3 additions & 0 deletions .github/workflows/lambdachecks.yml
Original file line number Diff line number Diff line change
Expand Up @@ -32,3 +32,6 @@ jobs:
- name: Run trace forwarder integration tests
run: |
./aws/logs_monitoring/trace_forwarder/scripts/run_tests.sh
- name: Run enhanced metric unittest
run: |
python -m unittest
41 changes: 28 additions & 13 deletions aws/logs_monitoring/enhanced_lambda_metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
BILLED_DURATION_METRIC_NAME = "billed_duration"
MEMORY_ALLOCATED_FIELD_NAME = "memorysize"
MAX_MEMORY_USED_METRIC_NAME = "max_memory_used"
INIT_DURATION_METRIC_NAME = "init_duration"

# Create named groups for each metric and tag so that we can
# access the values from the search result by name
Expand All @@ -39,13 +40,9 @@
)
+ r"Memory\s+Size:\s+(?P<{}>\d+)\s+MB\s+".format(MEMORY_ALLOCATED_FIELD_NAME)
+ r"Max\s+Memory\s+Used:\s+(?P<{}>\d+)\s+MB".format(MAX_MEMORY_USED_METRIC_NAME)
+ r"(\s+Init\s+Duration:\s+(?P<{}>[\d\.]+)\s+ms)?".format(INIT_DURATION_METRIC_NAME)
)

# Pull memorysize tag and cold start from report
TAGS_TO_PARSE_FROM_REPORT = [
MEMORY_ALLOCATED_FIELD_NAME,
]

METRICS_TO_PARSE_FROM_REPORT = [
DURATION_METRIC_NAME,
BILLED_DURATION_METRIC_NAME,
Expand All @@ -56,6 +53,7 @@
METRIC_ADJUSTMENT_FACTORS = {
DURATION_METRIC_NAME: 0.001,
BILLED_DURATION_METRIC_NAME: 0.001,
INIT_DURATION_METRIC_NAME: 0.001,
}


Expand Down Expand Up @@ -439,17 +437,36 @@ def parse_metrics_from_report_log(report_log_line):
Returns:
metrics - DatadogMetricPoint[]
"""

regex_match = REPORT_LOG_REGEX.search(report_log_line)

if not regex_match:
return []

metrics = []
tags = []

# loop is to account for adding cold start
for tag in TAGS_TO_PARSE_FROM_REPORT:
tags.append(tag + ":" + regex_match.group(tag))
tags = ["memorysize:" + regex_match.group(MEMORY_ALLOCATED_FIELD_NAME)]
if regex_match.group(INIT_DURATION_METRIC_NAME):
tags.append("cold_start:true")
else:
tags.append("cold_start:false")

# if cold_start:
if regex_match.group(INIT_DURATION_METRIC_NAME):
metric_point_value = float(regex_match.group(INIT_DURATION_METRIC_NAME))
# Multiply by 1/1000 to convert ms to seconds
metric_point_value *= METRIC_ADJUSTMENT_FACTORS[INIT_DURATION_METRIC_NAME]

initial_duration = DatadogMetricPoint(
"{}.{}".format(
ENHANCED_METRICS_NAMESPACE_PREFIX, INIT_DURATION_METRIC_NAME
),
metric_point_value,
)

initial_duration.add_tags(tags)

metrics.append(initial_duration)
Comment on lines +454 to +469
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@jcstorms1 my apologies, after a weekend, I totally forgot that another suggestion I pointed out last week was merging line 454-469 (after change) to the for loop of line 471 (after change), their logic are 99% same. I suggest adding INIT_DURATION_METRIC_NAME to METRICS_TO_PARSE_FROM_REPORT and add a check within the for loop like this

for metric_name in METRICS_TO_PARSE_FROM_REPORT:
    if not regex_match.group(metric_name):
        continue
    metric_point_value = float(regex_match.group(metric_name))
    ....


for metric_name in METRICS_TO_PARSE_FROM_REPORT:
metric_point_value = float(regex_match.group(metric_name))
Expand All @@ -462,8 +479,7 @@ def parse_metrics_from_report_log(report_log_line):
metric_point_value,
)

if tags:
dd_metric.add_tags(tags)
dd_metric.add_tags(tags)

metrics.append(dd_metric)

Expand All @@ -475,8 +491,7 @@ def parse_metrics_from_report_log(report_log_line):
),
)

if tags:
estimated_cost_metric_point.add_tags(tags)
estimated_cost_metric_point.add_tags(tags)

metrics.append(estimated_cost_metric_point)

Expand Down
73 changes: 65 additions & 8 deletions aws/logs_monitoring/tests/test_enhanced_lambda_metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,11 @@ class TestEnhancedLambdaMetrics(unittest.TestCase):
"Duration: 0.62 ms Billed Duration: 100 ms Memory Size: 128 MB Max Memory Used: 51 MB"
)

cold_start_report = (
"REPORT RequestId: 8edab1f8-7d34-4a8e-a965-15ccbbb78d4c "
"Duration: 0.81 ms Billed Duration: 100 ms Memory Size: 128 MB Max Memory Used: 90 MB Init Duration: 1234 ms"
)

report_with_xray = (
"REPORT RequestId: 814ba7cb-071e-4181-9a09-fa41db5bccad\tDuration: 1711.87 ms\t"
"Billed Duration: 1800 ms\tMemory Size: 128 MB\tMax Memory Used: 98 MB\t\n"
Expand Down Expand Up @@ -114,56 +119,92 @@ def test_parse_metrics_from_report_log(self):
[
{
"name": "aws.lambda.enhanced.duration",
"tags": [],
"tags": ["memorysize:128", "cold_start:false",],
"value": 0.00062,
"timestamp": None,
},
{
"name": "aws.lambda.enhanced.billed_duration",
"tags": [],
"tags": ["memorysize:128", "cold_start:false",],
"value": 0.1000,
"timestamp": None,
},
{
"name": "aws.lambda.enhanced.max_memory_used",
"tags": [],
"tags": ["memorysize:128", "cold_start:false",],
"value": 51.0,
"timestamp": None,
},
{
"name": "aws.lambda.enhanced.estimated_cost",
"tags": [],
"tags": ["memorysize:128", "cold_start:false",],
"timestamp": None,
"value": 4.0833375e-07,
},
],
)

parsed_metrics = parse_metrics_from_report_log(self.cold_start_report)
self.assertEqual(
[metric.__dict__ for metric in parsed_metrics],
[
{
"name": "aws.lambda.enhanced.init_duration",
"tags": ["memorysize:128", "cold_start:true",],
"value": 1.234,
"timestamp": None,
},
{
"name": "aws.lambda.enhanced.duration",
"tags": ["memorysize:128", "cold_start:true",],
"value": 0.0008100000000000001,
"timestamp": None,
},
{
"name": "aws.lambda.enhanced.billed_duration",
"tags": ["memorysize:128", "cold_start:true",],
"value": 0.1000,
"timestamp": None,
},
{
"name": "aws.lambda.enhanced.max_memory_used",
"tags": ["memorysize:128", "cold_start:true",],
"value": 90.0,
"timestamp": None,
},
{
"name": "aws.lambda.enhanced.estimated_cost",
"tags": ["memorysize:128", "cold_start:true",],
"timestamp": None,
"value": 4.0833375e-07,
},
],
)
parsed_metrics = parse_metrics_from_report_log(self.report_with_xray)
self.assertListEqual(
[metric.__dict__ for metric in parsed_metrics],
[
{
"name": "aws.lambda.enhanced.duration",
"tags": [],
"tags": ["memorysize:128", "cold_start:false",],
"timestamp": None,
"value": 1.71187,
},
{
"name": "aws.lambda.enhanced.billed_duration",
"tags": [],
"tags": ["memorysize:128", "cold_start:false",],
"timestamp": None,
"value": 1.8,
},
{
"name": "aws.lambda.enhanced.max_memory_used",
"tags": [],
"tags": ["memorysize:128", "cold_start:false",],
"timestamp": None,
"value": 98.0,
},
{
"name": "aws.lambda.enhanced.estimated_cost",
"tags": [],
"tags": ["memorysize:128", "cold_start:false",],
"timestamp": None,
"value": 3.9500075e-06,
},
Expand Down Expand Up @@ -208,6 +249,8 @@ def test_generate_enhanced_lambda_metrics(self, mock_build_cache):
{
"name": "aws.lambda.enhanced.duration",
"tags": [
"memorysize:128",
"cold_start:false",
"region:us-east-1",
"account_id:172597598159",
"aws_account:172597598159",
Expand All @@ -219,6 +262,8 @@ def test_generate_enhanced_lambda_metrics(self, mock_build_cache):
{
"name": "aws.lambda.enhanced.billed_duration",
"tags": [
"memorysize:128",
"cold_start:false",
"region:us-east-1",
"account_id:172597598159",
"aws_account:172597598159",
Expand All @@ -230,6 +275,8 @@ def test_generate_enhanced_lambda_metrics(self, mock_build_cache):
{
"name": "aws.lambda.enhanced.max_memory_used",
"tags": [
"memorysize:128",
"cold_start:false",
"region:us-east-1",
"account_id:172597598159",
"aws_account:172597598159",
Expand All @@ -241,6 +288,8 @@ def test_generate_enhanced_lambda_metrics(self, mock_build_cache):
{
"name": "aws.lambda.enhanced.estimated_cost",
"tags": [
"memorysize:128",
"cold_start:false",
"region:us-east-1",
"account_id:172597598159",
"aws_account:172597598159",
Expand Down Expand Up @@ -292,6 +341,8 @@ def test_generate_enhanced_lambda_metrics_with_tags(self, mock_build_cache):
{
"name": "aws.lambda.enhanced.duration",
"tags": [
"memorysize:128",
"cold_start:false",
"region:us-east-1",
"account_id:172597598159",
"aws_account:172597598159",
Expand All @@ -307,6 +358,8 @@ def test_generate_enhanced_lambda_metrics_with_tags(self, mock_build_cache):
{
"name": "aws.lambda.enhanced.billed_duration",
"tags": [
"memorysize:128",
"cold_start:false",
"region:us-east-1",
"account_id:172597598159",
"aws_account:172597598159",
Expand All @@ -322,6 +375,8 @@ def test_generate_enhanced_lambda_metrics_with_tags(self, mock_build_cache):
{
"name": "aws.lambda.enhanced.max_memory_used",
"tags": [
"memorysize:128",
"cold_start:false",
"region:us-east-1",
"account_id:172597598159",
"aws_account:172597598159",
Expand All @@ -337,6 +392,8 @@ def test_generate_enhanced_lambda_metrics_with_tags(self, mock_build_cache):
{
"name": "aws.lambda.enhanced.estimated_cost",
"tags": [
"memorysize:128",
"cold_start:false",
"region:us-east-1",
"account_id:172597598159",
"aws_account:172597598159",
Expand Down