Skip to content

Commit 6173521

Browse files
Tom McCarthyheitorlessa
Tom McCarthy
andauthored
feat: improve error handling for log_metrics decorator (#71)
* feat: dont throw exception by default from log_metrics if no metrics are emitted * docs: update details for change to error handling * chore: rename parameter for clarity * fix: correct bug in exception handling from previous commits add raise_on_empty_metrics param to partial func call revert mistake in change to test for exception propagation * improv: change log debug statement to warning when no metrics are present * docs: add note on suppressing warning for empty metric set * improv: add warning for manual serialization Signed-off-by: heitorlessa <[email protected]> * improv: whitespace and warning supress as info Signed-off-by: heitorlessa <[email protected]> Co-authored-by: heitorlessa <[email protected]>
1 parent 644240b commit 6173521

File tree

3 files changed

+66
-12
lines changed

3 files changed

+66
-12
lines changed

Diff for: aws_lambda_powertools/metrics/metrics.py

+23-6
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
import json
33
import logging
44
import os
5+
import warnings
56
from typing import Any, Callable
67

78
from .base import MetricManager
@@ -82,7 +83,12 @@ def clear_metrics(self):
8283
self.metric_set.clear()
8384
self.dimension_set.clear()
8485

85-
def log_metrics(self, lambda_handler: Callable[[Any, Any], Any] = None, capture_cold_start_metric: bool = False):
86+
def log_metrics(
87+
self,
88+
lambda_handler: Callable[[Any, Any], Any] = None,
89+
capture_cold_start_metric: bool = False,
90+
raise_on_empty_metrics: bool = False,
91+
):
8692
"""Decorator to serialize and publish metrics at the end of a function execution.
8793
8894
Be aware that the log_metrics **does call* the decorated function (e.g. lambda_handler).
@@ -102,6 +108,10 @@ def handler(event, context)
102108
----------
103109
lambda_handler : Callable[[Any, Any], Any], optional
104110
Lambda function handler, by default None
111+
capture_cold_start_metric : bool, optional
112+
Captures cold start metric, by default False
113+
raise_on_empty_metrics : bool, optional
114+
Raise exception if no metrics are emitted, by default False
105115
106116
Raises
107117
------
@@ -113,7 +123,11 @@ def handler(event, context)
113123
# Return a partial function with args filled
114124
if lambda_handler is None:
115125
logger.debug("Decorator called with parameters")
116-
return functools.partial(self.log_metrics, capture_cold_start_metric=capture_cold_start_metric)
126+
return functools.partial(
127+
self.log_metrics,
128+
capture_cold_start_metric=capture_cold_start_metric,
129+
raise_on_empty_metrics=raise_on_empty_metrics,
130+
)
117131

118132
@functools.wraps(lambda_handler)
119133
def decorate(event, context):
@@ -122,10 +136,13 @@ def decorate(event, context):
122136
if capture_cold_start_metric:
123137
self.__add_cold_start_metric(context=context)
124138
finally:
125-
metrics = self.serialize_metric_set()
126-
self.clear_metrics()
127-
logger.debug("Publishing metrics", {"metrics": metrics})
128-
print(json.dumps(metrics))
139+
if not raise_on_empty_metrics and not self.metric_set:
140+
warnings.warn("No metrics to publish, skipping")
141+
else:
142+
metrics = self.serialize_metric_set()
143+
self.clear_metrics()
144+
logger.debug("Publishing metrics", {"metrics": metrics})
145+
print(json.dumps(metrics))
129146

130147
return response
131148

Diff for: docs/content/core/metrics.mdx

+23-3
Original file line numberDiff line numberDiff line change
@@ -107,15 +107,31 @@ def lambda_handler(evt, ctx):
107107
...
108108
```
109109

110-
`log_metrics` decorator **validates**, **serializes**, and **flushes** all your metrics. During metrics validation, if any of the following criteria is met, `SchemaValidationError` exception will be raised:
110+
`log_metrics` decorator **validates**, **serializes**, and **flushes** all your metrics. During metrics validation, if no metrics are provided then a warning will be logged, but no exception will be raised.
111111

112-
* At least of one Metric and Dimension
112+
If metrics are provided, and any of the following criteria are not met, `SchemaValidationError` exception will be raised:
113+
114+
* Minimum of 1 dimension
113115
* Maximum of 9 dimensions
114116
* Namespace is set, and no more than one
115117
* Metric units must be [supported by CloudWatch](https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_MetricDatum.html)
116118

119+
If you want to ensure that at least one metric is emitted, you can pass `raise_on_empty_metrics` to the **log_metrics** decorator:
120+
121+
```python:title=lambda_handler.py
122+
from aws_lambda_powertools.metrics import Metrics
123+
124+
@metrics.log_metrics(raise_on_empty_metrics=True) # highlight-line
125+
def lambda_handler(evt, ctx):
126+
...
127+
```
128+
129+
<Note type="info">
130+
If you expect your function to execute without publishing metrics every time, you can suppress the warning with <strong><code>warnings.filterwarnings("ignore", "No metrics to publish*")</code></strong>.
131+
</Note><br/>
132+
117133
<Note type="warning">
118-
When nesting multiple middlwares, you should use <strong><code>log_metrics</code> as your last decorator wrapping all subsequent ones</strong>.
134+
When nesting multiple middlewares, you should use <strong><code>log_metrics</code> as your last decorator wrapping all subsequent ones</strong>.
119135
</Note><br/>
120136

121137
```python:title=lambda_handler_nested_middlewares.py
@@ -138,6 +154,10 @@ def lambda_handler(evt, ctx):
138154

139155
If you prefer not to use `log_metrics` because you might want to encapsulate additional logic when doing so, you can manually flush and clear metrics as follows:
140156

157+
<Note type="warning">
158+
Metrics, dimensions and namespace validation still applies.
159+
</Note><br/>
160+
141161
```python:title=manual_metric_serialization.py
142162
import json
143163
from aws_lambda_powertools import Metrics

Diff for: tests/functional/test_metrics.py

+20-3
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
import json
2+
import warnings
23
from collections import namedtuple
34
from typing import Any, Dict, List
45

@@ -350,13 +351,13 @@ def test_log_no_metrics_error_propagation(capsys, metric, dimension, namespace):
350351
# GIVEN Metrics is initialized
351352
my_metrics = Metrics()
352353

353-
@my_metrics.log_metrics
354+
@my_metrics.log_metrics(raise_on_empty_metrics=True)
354355
def lambda_handler(evt, context):
355-
# WHEN log_metrics is used despite having no metrics
356+
# WHEN log_metrics is used with raise_on_empty_metrics param and has no metrics
356357
# and the function decorated also raised an exception
357358
raise ValueError("Bubble up")
358359

359-
# THEN we should first raise SchemaValidationError as the main exception
360+
# THEN the raised exception should be
360361
with pytest.raises(SchemaValidationError):
361362
lambda_handler({}, {})
362363

@@ -632,3 +633,19 @@ def lambda_handler(evt, context):
632633
assert "ColdStart" not in output
633634

634635
assert "function_name" not in output
636+
637+
638+
def test_log_metrics_decorator_no_metrics(dimensions, namespace):
639+
# GIVEN Metrics is initialized
640+
my_metrics = Metrics(namespace=namespace["name"], service="test_service")
641+
642+
# WHEN using the log_metrics decorator and no metrics have been added
643+
@my_metrics.log_metrics
644+
def lambda_handler(evt, context):
645+
pass
646+
647+
# THEN it should raise a warning instead of throwing an exception
648+
with warnings.catch_warnings(record=True) as w:
649+
lambda_handler({}, {})
650+
assert len(w) == 1
651+
assert str(w[-1].message) == "No metrics to publish, skipping"

0 commit comments

Comments
 (0)