Skip to content

feat: improve error handling for log_metrics decorator #71

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 8 commits into from
Jun 9, 2020
29 changes: 23 additions & 6 deletions aws_lambda_powertools/metrics/metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
import json
import logging
import os
import warnings
from typing import Any, Callable

from aws_lambda_powertools.metrics.base import MetricManager
Expand Down Expand Up @@ -82,7 +83,12 @@ def clear_metrics(self):
self.metric_set.clear()
self.dimension_set.clear()

def log_metrics(self, lambda_handler: Callable[[Any, Any], Any] = None, capture_cold_start_metric: bool = False):
def log_metrics(
self,
lambda_handler: Callable[[Any, Any], Any] = None,
capture_cold_start_metric: bool = False,
raise_on_empty_metrics: bool = False,
):
"""Decorator to serialize and publish metrics at the end of a function execution.

Be aware that the log_metrics **does call* the decorated function (e.g. lambda_handler).
Expand All @@ -102,6 +108,10 @@ def handler(event, context)
----------
lambda_handler : Callable[[Any, Any], Any], optional
Lambda function handler, by default None
capture_cold_start_metric : bool, optional
Captures cold start metric, by default False
raise_on_empty_metrics : bool, optional
Raise exception if no metrics are emitted, by default False

Raises
------
Expand All @@ -113,7 +123,11 @@ def handler(event, context)
# Return a partial function with args filled
if lambda_handler is None:
logger.debug("Decorator called with parameters")
return functools.partial(self.log_metrics, capture_cold_start_metric=capture_cold_start_metric)
return functools.partial(
self.log_metrics,
capture_cold_start_metric=capture_cold_start_metric,
raise_on_empty_metrics=raise_on_empty_metrics,
)

@functools.wraps(lambda_handler)
def decorate(event, context):
Expand All @@ -122,10 +136,13 @@ def decorate(event, context):
if capture_cold_start_metric:
self.__add_cold_start_metric(context=context)
finally:
metrics = self.serialize_metric_set()
self.clear_metrics()
logger.debug("Publishing metrics", {"metrics": metrics})
print(json.dumps(metrics))
if not raise_on_empty_metrics and not self.metric_set:
warnings.warn("No metrics to publish, skipping")
else:
metrics = self.serialize_metric_set()
self.clear_metrics()
logger.debug("Publishing metrics", {"metrics": metrics})
print(json.dumps(metrics))

return response

Expand Down
26 changes: 23 additions & 3 deletions docs/content/core/metrics.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -103,15 +103,31 @@ def lambda_handler(evt, ctx):
...
```

`log_metrics` decorator **validates**, **serializes**, and **flushes** all your metrics. During metrics validation, if any of the following criteria is met, `SchemaValidationError` exception will be raised:
`log_metrics` decorator **validates**, **serializes**, and **flushes** all your metrics. During metrics validation, if no metrics are provided then a warning will be logged, but no exception will be raised.

* At least of one Metric and Dimension
If metrics are provided, and any of the following criteria are not met, `SchemaValidationError` exception will be raised:

* Minimum of 1 dimension
* Maximum of 9 dimensions
* Namespace is set, and no more than one
* Metric units must be [supported by CloudWatch](https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_MetricDatum.html)

If you want to ensure that at least one metric is emitted, you can pass `raise_on_empty_metrics` to the **log_metrics** decorator:

```python:title=lambda_handler.py
from aws_lambda_powertools.metrics import Metrics

@metrics.log_metrics(raise_on_empty_metrics=True) # highlight-line
def lambda_handler(evt, ctx):
...
```

<Note type="info">
If you expect your function to execute without publishing metrics every time, you can suppress the warning with <strong><code>warnings.filterwarnings("ignore", "No metrics to publish*")</code></strong>.
</Note><br/>

<Note type="warning">
When nesting multiple middlwares, you should use <strong><code>log_metrics</code> as your last decorator wrapping all subsequent ones</strong>.
When nesting multiple middlewares, you should use <strong><code>log_metrics</code> as your last decorator wrapping all subsequent ones</strong>.
</Note><br/>

```python:title=lambda_handler_nested_middlewares.py
Expand All @@ -133,6 +149,10 @@ def lambda_handler(evt, ctx):

If you prefer not to use `log_metrics` because you might want to encapsulate additional logic when doing so, you can manually flush and clear metrics as follows:

<Note type="warning">
Metrics, dimensions and namespace validation still applies.
</Note><br/>

```python:title=manual_metric_serialization.py
import json
from aws_lambda_powertools.metrics import Metrics, MetricUnit
Expand Down
23 changes: 20 additions & 3 deletions tests/functional/test_metrics.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import json
import warnings
from collections import namedtuple
from typing import Any, Dict, List

Expand Down Expand Up @@ -351,13 +352,13 @@ def test_log_no_metrics_error_propagation(capsys, metric, dimension, namespace):
# GIVEN Metrics is initialized
my_metrics = Metrics()

@my_metrics.log_metrics
@my_metrics.log_metrics(raise_on_empty_metrics=True)
def lambda_handler(evt, context):
# WHEN log_metrics is used despite having no metrics
# WHEN log_metrics is used with raise_on_empty_metrics param and has no metrics
# and the function decorated also raised an exception
raise ValueError("Bubble up")

# THEN we should first raise SchemaValidationError as the main exception
# THEN the raised exception should be
with pytest.raises(SchemaValidationError):
lambda_handler({}, {})

Expand Down Expand Up @@ -633,3 +634,19 @@ def lambda_handler(evt, context):
assert "ColdStart" not in output

assert "function_name" not in output


def test_log_metrics_decorator_no_metrics(dimensions, namespace):
# GIVEN Metrics is initialized
my_metrics = Metrics(namespace=namespace["name"], service="test_service")

# WHEN using the log_metrics decorator and no metrics have been added
@my_metrics.log_metrics
def lambda_handler(evt, context):
pass

# THEN it should raise a warning instead of throwing an exception
with warnings.catch_warnings(record=True) as w:
lambda_handler({}, {})
assert len(w) == 1
assert str(w[-1].message) == "No metrics to publish, skipping"