Skip to content

feat: improve error handling for log_metrics decorator #71

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 8 commits into from
Jun 9, 2020
28 changes: 22 additions & 6 deletions aws_lambda_powertools/metrics/metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,12 @@ def clear_metrics(self):
self.metric_set.clear()
self.dimension_set.clear()

def log_metrics(self, lambda_handler: Callable[[Any, Any], Any] = None, capture_cold_start_metric: bool = False):
def log_metrics(
self,
lambda_handler: Callable[[Any, Any], Any] = None,
capture_cold_start_metric: bool = False,
raise_on_empty_metrics: bool = False,
):
"""Decorator to serialize and publish metrics at the end of a function execution.

Be aware that the log_metrics **does call* the decorated function (e.g. lambda_handler).
Expand All @@ -102,6 +107,10 @@ def handler(event, context)
----------
lambda_handler : Callable[[Any, Any], Any], optional
Lambda function handler, by default None
capture_cold_start_metric : bool, optional
Captures cold start metric, by default False
raise_on_empty_metrics : bool, optional
Raise exception if no metrics are emitted, by default False

Raises
------
Expand All @@ -113,7 +122,11 @@ def handler(event, context)
# Return a partial function with args filled
if lambda_handler is None:
logger.debug("Decorator called with parameters")
return functools.partial(self.log_metrics, capture_cold_start_metric=capture_cold_start_metric)
return functools.partial(
self.log_metrics,
capture_cold_start_metric=capture_cold_start_metric,
raise_on_empty_metrics=raise_on_empty_metrics,
)

@functools.wraps(lambda_handler)
def decorate(event, context):
Expand All @@ -122,10 +135,13 @@ def decorate(event, context):
if capture_cold_start_metric:
self.__add_cold_start_metric(context=context)
finally:
metrics = self.serialize_metric_set()
self.clear_metrics()
logger.debug("Publishing metrics", {"metrics": metrics})
print(json.dumps(metrics))
if not raise_on_empty_metrics and not self.metric_set:
logger.debug("No metrics to publish, skipping")
else:
metrics = self.serialize_metric_set()
self.clear_metrics()
logger.debug("Publishing metrics", {"metrics": metrics})
print(json.dumps(metrics))

return response

Expand Down
8 changes: 5 additions & 3 deletions docs/content/core/metrics.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -103,15 +103,17 @@ def lambda_handler(evt, ctx):
...
```

`log_metrics` decorator **validates**, **serializes**, and **flushes** all your metrics. During metrics validation, if any of the following criteria is met, `SchemaValidationError` exception will be raised:
`log_metrics` decorator **validates**, **serializes**, and **flushes** all your metrics. During metrics validation, if no metrics are provided then no exception will be raised. If metrics are provided, and any of the following criteria are not met, `SchemaValidationError` exception will be raised:

* At least of one Metric and Dimension
* Minimum of 1 dimension
* Maximum of 9 dimensions
* Namespace is set, and no more than one
* Metric units must be [supported by CloudWatch](https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_MetricDatum.html)

If you want to ensure that at least 1 metric is emitted, you can pass `raise_on_empty_metrics` to the `log_metrics` decorator: `@metrics.log_metrics(raise_on_empty_metrics=True)`

<Note type="warning">
When nesting multiple middlwares, you should use <strong><code>log_metrics</code> as your last decorator wrapping all subsequent ones</strong>.
When nesting multiple middlewares, you should use <strong><code>log_metrics</code> as your last decorator wrapping all subsequent ones</strong>.
</Note><br/>

```python:title=lambda_handler_nested_middlewares.py
Expand Down
22 changes: 19 additions & 3 deletions tests/functional/test_metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -351,13 +351,13 @@ def test_log_no_metrics_error_propagation(capsys, metric, dimension, namespace):
# GIVEN Metrics is initialized
my_metrics = Metrics()

@my_metrics.log_metrics
@my_metrics.log_metrics(raise_on_empty_metrics=True)
def lambda_handler(evt, context):
# WHEN log_metrics is used despite having no metrics
# WHEN log_metrics is used with raise_on_empty_metrics param and has no metrics
# and the function decorated also raised an exception
raise ValueError("Bubble up")

# THEN we should first raise SchemaValidationError as the main exception
# THEN the raised exception should be
with pytest.raises(SchemaValidationError):
lambda_handler({}, {})

Expand Down Expand Up @@ -633,3 +633,19 @@ def lambda_handler(evt, context):
assert "ColdStart" not in output

assert "function_name" not in output


def test_log_metrics_decorator_no_metrics(capsys, dimensions, namespace):
# GIVEN Metrics is initialized
my_metrics = Metrics(namespace=namespace["name"], service="test_service")

# WHEN using the log_metrics decorator and no metrics have been added
@my_metrics.log_metrics
def lambda_handler(evt, context):
pass

# THEN it should not throw an exception, and should not log anything
LambdaContext = namedtuple("LambdaContext", "function_name")
lambda_handler({}, LambdaContext("example_fn"))

assert capsys.readouterr().out == ""