diff --git a/CHANGELOG.md b/CHANGELOG.md index f139fc3d21c..53af3f5b686 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] ### Added +- **Metrics**: Support adding multiple metric values to a metric name - **Utilities**: Add new `Validator` utility to validate inbound events and responses using JSON Schema ## [1.5.0] - 2020-09-04 diff --git a/aws_lambda_powertools/metrics/base.py b/aws_lambda_powertools/metrics/base.py index bff0c84e03f..ef1ed85eb4b 100644 --- a/aws_lambda_powertools/metrics/base.py +++ b/aws_lambda_powertools/metrics/base.py @@ -4,6 +4,7 @@ import numbers import os import pathlib +from collections import defaultdict from enum import Enum from typing import Any, Dict, List, Union @@ -93,7 +94,7 @@ def __init__( self._metric_unit_options = list(MetricUnit.__members__) self.metadata_set = self.metadata_set if metadata_set is not None else {} - def add_metric(self, name: str, unit: MetricUnit, value: Union[float, int]): + def add_metric(self, name: str, unit: Union[MetricUnit, str], value: float): """Adds given metric Example @@ -110,9 +111,9 @@ def add_metric(self, name: str, unit: MetricUnit, value: Union[float, int]): ---------- name : str Metric name - unit : MetricUnit + unit : Union[MetricUnit, str] `aws_lambda_powertools.helper.models.MetricUnit` - value : Union[float, int] + value : float Metric value Raises @@ -124,7 +125,9 @@ def add_metric(self, name: str, unit: MetricUnit, value: Union[float, int]): raise MetricValueError(f"{value} is not a valid number") unit = self.__extract_metric_unit_value(unit=unit) - metric = {"Unit": unit, "Value": float(value)} + metric = self.metric_set.get(name, defaultdict(list)) + metric["Unit"] = unit + metric["Value"].append(float(value)) logger.debug(f"Adding metric: {name} with {metric}") self.metric_set[name] = metric diff --git a/tests/functional/test_metrics.py b/tests/functional/test_metrics.py index 4d092131988..a3d471ab305 100644 --- a/tests/functional/test_metrics.py +++ b/tests/functional/test_metrics.py @@ -32,6 +32,14 @@ def metrics() -> List[Dict[str, str]]: ] +@pytest.fixture +def metrics_same_name() -> List[Dict[str, str]]: + return [ + {"name": "metric_one", "unit": MetricUnit.Count, "value": 1}, + {"name": "metric_one", "unit": MetricUnit.Count, "value": 5}, + ] + + @pytest.fixture def dimension() -> Dict[str, str]: return {"name": "test_dimension", "value": "test"} @@ -485,7 +493,7 @@ def lambda_handler(evt, context): output = capture_metrics_output(capsys) # THEN ColdStart metric and function_name dimension should be logged - assert output["ColdStart"] == 1 + assert output["ColdStart"] == [1.0] assert output["function_name"] == "example_fn" @@ -607,7 +615,7 @@ def lambda_handler(evt, ctx): def test_serialize_metric_set_metric_definition(metric, dimension, namespace, service, metadata): expected_metric_definition = { - "single_metric": 1.0, + "single_metric": [1.0], "_aws": { "Timestamp": 1592237875494, "CloudWatchMetrics": [ @@ -655,7 +663,7 @@ def lambda_handler(evt, context): # THEN ColdStart metric and function_name dimension should be logged # in a separate EMF blob than the application metrics - assert cold_start_blob["ColdStart"] == 1 + assert cold_start_blob["ColdStart"] == [1.0] assert cold_start_blob["function_name"] == "example_fn" assert cold_start_blob["service"] == service @@ -669,5 +677,65 @@ def lambda_handler(evt, context): # and that application metrics are recorded as normal assert custom_metrics_blob["service"] == service - assert custom_metrics_blob["single_metric"] == metric["value"] + assert custom_metrics_blob["single_metric"] == [float(metric["value"])] assert custom_metrics_blob["test_dimension"] == dimension["value"] + + +def test_log_multiple_metrics(capsys, metrics_same_name, dimensions, namespace): + # GIVEN Metrics is initialized + my_metrics = Metrics(namespace=namespace) + + for dimension in dimensions: + my_metrics.add_dimension(**dimension) + + # WHEN we utilize log_metrics to serialize + # and flush multiple metrics with the same name at the end of a function execution + @my_metrics.log_metrics + def lambda_handler(evt, ctx): + for metric in metrics_same_name: + my_metrics.add_metric(**metric) + + lambda_handler({}, {}) + output = capture_metrics_output(capsys) + expected = serialize_metrics(metrics=metrics_same_name, dimensions=dimensions, namespace=namespace) + + # THEN we should have no exceptions + # and a valid EMF object should be flushed correctly + remove_timestamp(metrics=[output, expected]) + assert expected == output + + +def test_serialize_metric_set_metric_definition_multiple_values( + metrics_same_name, dimension, namespace, service, metadata +): + expected_metric_definition = { + "metric_one": [1.0, 5.0], + "_aws": { + "Timestamp": 1592237875494, + "CloudWatchMetrics": [ + { + "Namespace": "test_namespace", + "Dimensions": [["test_dimension", "service"]], + "Metrics": [{"Name": "metric_one", "Unit": "Count"}], + } + ], + }, + "service": "test_service", + "username": "test", + "test_dimension": "test", + } + + # GIVEN Metrics is initialized and multiple metrics are added with the same name + my_metrics = Metrics(service=service, namespace=namespace) + for metric in metrics_same_name: + my_metrics.add_metric(**metric) + my_metrics.add_dimension(**dimension) + my_metrics.add_metadata(**metadata) + + # WHEN metrics are serialized manually + metric_definition_output = my_metrics.serialize_metric_set() + + # THEN we should emit a valid embedded metric definition object + assert "Timestamp" in metric_definition_output["_aws"] + remove_timestamp(metrics=[metric_definition_output, expected_metric_definition]) + assert metric_definition_output == expected_metric_definition