From 0cec3d265d308cbde9270d890f670bfeeca88526 Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Wed, 8 Feb 2023 22:18:55 +0000 Subject: [PATCH 01/11] feat(metrics) - Adding high resolution metrics --- aws_lambda_powertools/metrics/__init__.py | 11 +- aws_lambda_powertools/metrics/base.py | 109 +++++++++++++++++--- aws_lambda_powertools/metrics/exceptions.py | 6 ++ aws_lambda_powertools/metrics/metrics.py | 4 +- tests/e2e/metrics/test_metrics.py | 20 ++++ 5 files changed, 134 insertions(+), 16 deletions(-) diff --git a/aws_lambda_powertools/metrics/__init__.py b/aws_lambda_powertools/metrics/__init__.py index 3315899da0b..5f30f14102d 100644 --- a/aws_lambda_powertools/metrics/__init__.py +++ b/aws_lambda_powertools/metrics/__init__.py @@ -1,7 +1,12 @@ """CloudWatch Embedded Metric Format utility """ -from .base import MetricUnit -from .exceptions import MetricUnitError, MetricValueError, SchemaValidationError +from .base import MetricResolution, MetricUnit +from .exceptions import ( + MetricResolutionError, + MetricUnitError, + MetricValueError, + SchemaValidationError, +) from .metric import single_metric from .metrics import EphemeralMetrics, Metrics @@ -11,6 +16,8 @@ "single_metric", "MetricUnit", "MetricUnitError", + "MetricResolution", + "MetricResolutionError", "SchemaValidationError", "MetricValueError", ] diff --git a/aws_lambda_powertools/metrics/base.py b/aws_lambda_powertools/metrics/base.py index c2949ab43da..59ddda09b9c 100644 --- a/aws_lambda_powertools/metrics/base.py +++ b/aws_lambda_powertools/metrics/base.py @@ -12,7 +12,12 @@ from ..shared import constants from ..shared.functions import resolve_env_var_choice -from .exceptions import MetricUnitError, MetricValueError, SchemaValidationError +from .exceptions import ( + MetricResolutionError, + MetricUnitError, + MetricValueError, + SchemaValidationError, +) logger = logging.getLogger(__name__) @@ -22,6 +27,11 @@ is_cold_start = True +class MetricResolution(Enum): + Standard = 60 + High = 1 + + class MetricUnit(Enum): Seconds = "Seconds" Microseconds = "Microseconds" @@ -72,7 +82,9 @@ class MetricManager: Raises ------ MetricUnitError - When metric metric isn't supported by CloudWatch + When metric unit isn't supported by CloudWatch + MetricResolutionError + When metric resolution isn't supported by CloudWatch MetricValueError When metric value isn't a number SchemaValidationError @@ -94,8 +106,16 @@ def __init__( self.metadata_set = metadata_set if metadata_set is not None else {} self._metric_units = [unit.value for unit in MetricUnit] self._metric_unit_options = list(MetricUnit.__members__) + self._metric_resolutions = [resolution.value for resolution in MetricResolution] + self._metric_resolution_options = list(MetricResolution.__members__) - def add_metric(self, name: str, unit: Union[MetricUnit, str], value: float) -> None: + def add_metric( + self, + name: str, + unit: Union[MetricUnit, str], + value: float, + resolution: Union[MetricResolution, int] = 60, + ) -> None: """Adds given metric Example @@ -108,6 +128,10 @@ def add_metric(self, name: str, unit: Union[MetricUnit, str], value: float) -> N metric.add_metric(name="BookingConfirmation", unit="Count", value=1) + **Add given metric with MetricResolution non default value** + + metric.add_metric(name="BookingConfirmation", unit="Count", value=1, resolution=MetricResolution.High) + Parameters ---------- name : str @@ -116,18 +140,24 @@ def add_metric(self, name: str, unit: Union[MetricUnit, str], value: float) -> N `aws_lambda_powertools.helper.models.MetricUnit` value : float Metric value + resolution : Union[MetricResolution, int] + `aws_lambda_powertools.helper.models.MetricResolution` Raises ------ MetricUnitError When metric unit is not supported by CloudWatch + MetricResolutionError + When metric resolution is not supported by CloudWatch """ if not isinstance(value, numbers.Number): raise MetricValueError(f"{value} is not a valid number") unit = self._extract_metric_unit_value(unit=unit) + resolution = self._extract_metric_resolution_value(resolution=resolution) metric: Dict = self.metric_set.get(name, defaultdict(list)) metric["Unit"] = unit + metric["StorageResolution"] = resolution metric["Value"].append(float(value)) logger.debug(f"Adding metric: {name} with {metric}") self.metric_set[name] = metric @@ -194,15 +224,20 @@ def serialize_metric_set( logger.debug({"details": "Serializing metrics", "metrics": metrics, "dimensions": dimensions}) - metric_names_and_units: List[Dict[str, str]] = [] # [ { "Name": "metric_name", "Unit": "Count" } ] + metric_names_and_units_and_resolution: List[ + Dict[str, Union[str, int]] + ] = [] # [ { "Name": "metric_name", "Unit": "Count", "StorageResolution": 60 } ] metric_names_and_values: Dict[str, float] = {} # { "metric_name": 1.0 } for metric_name in metrics: metric: dict = metrics[metric_name] metric_value: int = metric.get("Value", 0) metric_unit: str = metric.get("Unit", "") + metric_resolution: int = metric.get("StorageResolution", 60) - metric_names_and_units.append({"Name": metric_name, "Unit": metric_unit}) + metric_names_and_units_and_resolution.append( + {"Name": metric_name, "Unit": metric_unit, "StorageResolution": metric_resolution} + ) metric_names_and_values.update({metric_name: metric_value}) return { @@ -212,7 +247,7 @@ def serialize_metric_set( { "Namespace": self.namespace, # "test_namespace" "Dimensions": [list(dimensions.keys())], # [ "service" ] - "Metrics": metric_names_and_units, + "Metrics": metric_names_and_units_and_resolution, } ], }, @@ -358,6 +393,39 @@ def decorate(event, context): return decorate + def _extract_metric_resolution_value(self, resolution: Union[int, MetricResolution]) -> int: + """Return metric value from metric unit whether that's str or MetricResolution enum + + Parameters + ---------- + unit : Union[int, MetricResolution] + Metric resolution + + Returns + ------- + int + Metric resolution value must be 1 or 60 + + Raises + ------ + MetricResolutionError + When metric resolution is not supported by CloudWatch + """ + + if isinstance(resolution, int): + if resolution in self._metric_resolution_options: + resolution = MetricResolution[str(resolution)].value + + if resolution not in self._metric_resolutions: + raise MetricResolutionError( + f"Invalid metric resolution '{resolution}', expected either option: {self._metric_resolution_options}" # noqa: E501 + ) + + if isinstance(resolution, MetricResolution): + resolution = resolution.value + + return resolution + def _extract_metric_unit_value(self, unit: Union[str, MetricUnit]) -> str: """Return metric value from metric unit whether that's str or MetricUnit enum @@ -429,10 +497,10 @@ class SingleMetric(MetricManager): **Creates cold start metric with function_version as dimension** import json - from aws_lambda_powertools.metrics import single_metric, MetricUnit + from aws_lambda_powertools.metrics import single_metric, MetricUnit, MetricResolution metric = single_metric(namespace="ServerlessAirline") - metric.add_metric(name="ColdStart", unit=MetricUnit.Count, value=1) + metric.add_metric(name="ColdStart", unit=MetricUnit.Count, value=1, resolution=MetricResolution.Standard) metric.add_dimension(name="function_version", value=47) print(json.dumps(metric.serialize_metric_set(), indent=4)) @@ -443,7 +511,13 @@ class SingleMetric(MetricManager): Inherits from `aws_lambda_powertools.metrics.base.MetricManager` """ - def add_metric(self, name: str, unit: Union[MetricUnit, str], value: float) -> None: + def add_metric( + self, + name: str, + unit: Union[MetricUnit, str], + value: float, + resolution: Optional[Union[MetricResolution, int]] = 60, + ) -> None: """Method to prevent more than one metric being created Parameters @@ -454,11 +528,13 @@ def add_metric(self, name: str, unit: Union[MetricUnit, str], value: float) -> N Metric unit (e.g. "Seconds", MetricUnit.Seconds) value : float Metric value + resolution : MetricResolution + Metric resolution (e.g. 60, MetricResolution.Standard) """ if len(self.metric_set) > 0: logger.debug(f"Metric {name} already set, skipping...") return - return super().add_metric(name, unit, value) + return super().add_metric(name, unit, value, resolution) @contextmanager @@ -466,6 +542,7 @@ def single_metric( name: str, unit: MetricUnit, value: float, + resolution: Optional[Union[MetricResolution, int]] = 60, namespace: Optional[str] = None, default_dimensions: Optional[Dict[str, str]] = None, ) -> Generator[SingleMetric, None, None]: @@ -477,8 +554,9 @@ def single_metric( from aws_lambda_powertools import single_metric from aws_lambda_powertools.metrics import MetricUnit + from aws_lambda_powertools.metrics import MetricResolution - with single_metric(name="ColdStart", unit=MetricUnit.Count, value=1, namespace="ServerlessAirline") as metric: + with single_metric(name="ColdStart", unit=MetricUnit.Count, value=1, resolution=MetricResolution.Standard, namespace="ServerlessAirline") as metric: # noqa E501 metric.add_dimension(name="function_version", value="47") **Same as above but set namespace using environment variable** @@ -487,8 +565,9 @@ def single_metric( from aws_lambda_powertools import single_metric from aws_lambda_powertools.metrics import MetricUnit + from aws_lambda_powertools.metrics import MetricResolution - with single_metric(name="ColdStart", unit=MetricUnit.Count, value=1) as metric: + with single_metric(name="ColdStart", unit=MetricUnit.Count, value=1, resolution=MetricResolution.Standard) as metric: # noqa E501 metric.add_dimension(name="function_version", value="47") Parameters @@ -497,6 +576,8 @@ def single_metric( Metric name unit : MetricUnit `aws_lambda_powertools.helper.models.MetricUnit` + resolution : MetricResolution + `aws_lambda_powertools.helper.models.MetricResolution` value : float Metric value namespace: str @@ -511,6 +592,8 @@ def single_metric( ------ MetricUnitError When metric metric isn't supported by CloudWatch + MetricResolutionError + When metric resolution isn't supported by CloudWatch MetricValueError When metric value isn't a number SchemaValidationError @@ -519,7 +602,7 @@ def single_metric( metric_set: Optional[Dict] = None try: metric: SingleMetric = SingleMetric(namespace=namespace) - metric.add_metric(name=name, unit=unit, value=value) + metric.add_metric(name=name, unit=unit, value=value, resolution=resolution) if default_dimensions: for dim_name, dim_value in default_dimensions.items(): diff --git a/aws_lambda_powertools/metrics/exceptions.py b/aws_lambda_powertools/metrics/exceptions.py index 0376c55a40e..94f492d14d7 100644 --- a/aws_lambda_powertools/metrics/exceptions.py +++ b/aws_lambda_powertools/metrics/exceptions.py @@ -4,6 +4,12 @@ class MetricUnitError(Exception): pass +class MetricResolutionError(Exception): + """When metric resolution is not supported by CloudWatch""" + + pass + + class SchemaValidationError(Exception): """When serialization fail schema validation""" diff --git a/aws_lambda_powertools/metrics/metrics.py b/aws_lambda_powertools/metrics/metrics.py index 43a45ff885d..085ebf9053f 100644 --- a/aws_lambda_powertools/metrics/metrics.py +++ b/aws_lambda_powertools/metrics/metrics.py @@ -50,7 +50,9 @@ def lambda_handler(): Raises ------ MetricUnitError - When metric metric isn't supported by CloudWatch + When metric unit isn't supported by CloudWatch + MetricResolutionError + When metric resolution isn't supported by CloudWatch MetricValueError When metric value isn't a number SchemaValidationError diff --git a/tests/e2e/metrics/test_metrics.py b/tests/e2e/metrics/test_metrics.py index 192cbcc25af..a76cff03068 100644 --- a/tests/e2e/metrics/test_metrics.py +++ b/tests/e2e/metrics/test_metrics.py @@ -48,6 +48,26 @@ def test_basic_lambda_metric_is_visible(basic_handler_fn: str, basic_handler_fn_ assert metric_values == [3.0] +@pytest.mark.xdist_group(name="metrics") +def test_metric_with_high_resolution(basic_handler_fn: str, basic_handler_fn_arn: str): + # GIVEN + metric_name = data_builder.build_metric_name() + service = data_builder.build_service_name() + dimensions = data_builder.build_add_dimensions_input(service=service) + metrics = data_builder.build_multiple_add_metric_input(metric_name=metric_name, value=1, quantity=3, resolution=1) + + # WHEN + event = json.dumps({"metrics": metrics, "service": service, "namespace": METRIC_NAMESPACE}) + _, execution_time = data_fetcher.get_lambda_response(lambda_arn=basic_handler_fn_arn, payload=event) + + metric_values = data_fetcher.get_metrics( + namespace=METRIC_NAMESPACE, start_date=execution_time, metric_name=metric_name, dimensions=dimensions + ) + + # THEN + assert metric_values == [3.0] + + @pytest.mark.xdist_group(name="metrics") def test_cold_start_metric(cold_start_fn_arn: str, cold_start_fn: str): # GIVEN From 991be42bdd3153647ad5997d0e7fe88dd84c8a86 Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Wed, 8 Feb 2023 22:51:28 +0000 Subject: [PATCH 02/11] feat(metrics) - Adding high resolution metrics - tests --- tests/functional/test_metrics.py | 37 +++++++++++++++++++++----------- 1 file changed, 25 insertions(+), 12 deletions(-) diff --git a/tests/functional/test_metrics.py b/tests/functional/test_metrics.py index d15b105057e..dc38968886a 100644 --- a/tests/functional/test_metrics.py +++ b/tests/functional/test_metrics.py @@ -1,13 +1,15 @@ import json import warnings from collections import namedtuple -from typing import Any, Dict, List +from typing import Any, Dict, List, Union import pytest from aws_lambda_powertools import Metrics, single_metric from aws_lambda_powertools.metrics import ( EphemeralMetrics, + MetricResolution, + MetricResolutionError, MetricUnit, MetricUnitError, MetricValueError, @@ -30,23 +32,23 @@ def reset_metric_set(): @pytest.fixture -def metric() -> Dict[str, str]: - return {"name": "single_metric", "unit": MetricUnit.Count, "value": 1} +def metric() -> Dict[str, Union[str, int]]: + return {"name": "single_metric", "unit": MetricUnit.Count, "value": 1, "resolution": MetricResolution.Standard} @pytest.fixture -def metrics() -> List[Dict[str, str]]: +def metrics() -> List[Dict[str, Union[str, int]]]: return [ - {"name": "metric_one", "unit": MetricUnit.Count, "value": 1}, - {"name": "metric_two", "unit": MetricUnit.Count, "value": 1}, + {"name": "metric_one", "unit": MetricUnit.Count, "value": 1, "resolution": MetricResolution.Standard}, + {"name": "metric_two", "unit": MetricUnit.Count, "value": 1, "resolution": MetricResolution.Standard}, ] @pytest.fixture -def metrics_same_name() -> List[Dict[str, str]]: +def metrics_same_name() -> List[Dict[str, Union[str, int]]]: return [ - {"name": "metric_one", "unit": MetricUnit.Count, "value": 1}, - {"name": "metric_one", "unit": MetricUnit.Count, "value": 5}, + {"name": "metric_one", "unit": MetricUnit.Count, "value": 1, "resolution": MetricResolution.Standard}, + {"name": "metric_one", "unit": MetricUnit.Count, "value": 5, "resolution": MetricResolution.Standard}, ] @@ -161,7 +163,7 @@ def test_single_metric_default_dimensions(capsys, metric, dimension, namespace): # WHEN using single_metric context manager default_dimensions = {dimension["name"]: dimension["value"]} with single_metric(namespace=namespace, default_dimensions=default_dimensions, **metric) as my_metric: - my_metric.add_metric(name="second_metric", unit="Count", value=1) + my_metric.add_metric(name="second_metric", unit="Count", value=1, resolution=60) output = capture_metrics_output(capsys) expected = serialize_single_metric(metric=metric, dimension=dimension, namespace=namespace) @@ -354,6 +356,17 @@ def test_schema_validation_incorrect_metric_unit(metric, dimension, namespace): my_metric.add_dimension(**dimension) +def test_schema_validation_incorrect_metric_resolution(metric, dimension, namespace): + # GIVEN we pass a metric resolution that is not supported by CloudWatch + metric["resolution"] = 10 # metric resolution must be 1 (High) or 60 (Standard) + + # WHEN we try adding a new metric + # THEN it should fail metric unit validation + with pytest.raises(MetricResolutionError): + with single_metric(**metric) as my_metric: + my_metric.add_dimension(**dimension) + + def test_schema_validation_no_namespace(metric, dimension): # GIVEN we don't add any namespace # WHEN we attempt to serialize a valid EMF object @@ -758,7 +771,7 @@ def test_serialize_metric_set_metric_definition(metric, dimension, namespace, se { "Namespace": "test_namespace", "Dimensions": [["test_dimension", "service"]], - "Metrics": [{"Name": "single_metric", "Unit": "Count"}], + "Metrics": [{"Name": "single_metric", "Unit": "Count", "StorageResolution": 60}], } ], }, @@ -852,7 +865,7 @@ def test_serialize_metric_set_metric_definition_multiple_values( { "Namespace": "test_namespace", "Dimensions": [["test_dimension", "service"]], - "Metrics": [{"Name": "metric_one", "Unit": "Count"}], + "Metrics": [{"Name": "metric_one", "Unit": "Count", "StorageResolution": 60}], } ], }, From dda29b9c79e56479a2f68addad3fa01e47121d24 Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Wed, 8 Feb 2023 23:00:58 +0000 Subject: [PATCH 03/11] feat(metrics) - Adding high resolution metrics - docs --- docs/core/metrics.md | 13 +++++++++++++ examples/metrics/src/add_high_resolution_metric.py | 10 ++++++++++ 2 files changed, 23 insertions(+) create mode 100644 examples/metrics/src/add_high_resolution_metric.py diff --git a/docs/core/metrics.md b/docs/core/metrics.md index ca42b632f84..745b9aac479 100644 --- a/docs/core/metrics.md +++ b/docs/core/metrics.md @@ -78,6 +78,19 @@ You can create metrics using `add_metric`, and you can create dimensions for all ???+ warning "Warning: Do not create metrics or dimensions outside the handler" Metrics or dimensions added in the global scope will only be added during cold start. Disregard if you that's the intended behavior. +### Adding high-resolution metrics + +You can create [high-resolution metrics](https://aws.amazon.com/pt/about-aws/whats-new/2023/02/amazon-cloudwatch-high-resolution-metric-extraction-structured-logs/) passing `resolution` parameter to `add_metric`. + +=== "add_high_resolution_metrics.py" + + ```python hl_lines="14-15" + --8<-- "examples/metrics/src/add_high_resolution_metric.py" + ``` + +???+ tip "Tip: Autocomplete Metric Resolutions" + `MetricResolution` enum facilitates finding a supported metric resolution by CloudWatch. Alternatively, you can pass the values 1 or 60 (must be one of them) as an integer _e.g. `resolution=1`_. + ### Adding multi-value metrics You can call `add_metric()` with the same metric name multiple times. The values will be grouped together in a list. diff --git a/examples/metrics/src/add_high_resolution_metric.py b/examples/metrics/src/add_high_resolution_metric.py new file mode 100644 index 00000000000..633fb114231 --- /dev/null +++ b/examples/metrics/src/add_high_resolution_metric.py @@ -0,0 +1,10 @@ +from aws_lambda_powertools import Metrics +from aws_lambda_powertools.metrics import MetricResolution, MetricUnit +from aws_lambda_powertools.utilities.typing import LambdaContext + +metrics = Metrics() + + +@metrics.log_metrics # ensures metrics are flushed upon request completion/failure +def lambda_handler(event: dict, context: LambdaContext): + metrics.add_metric(name="SuccessfulBooking", unit=MetricUnit.Count, value=1, resolution=MetricResolution.High) From deaab2ef776c095186429b4c595c099d414f5628 Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Wed, 8 Feb 2023 23:04:26 +0000 Subject: [PATCH 04/11] feat(metrics) - Adding high resolution metrics - docs --- aws_lambda_powertools/metrics/base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws_lambda_powertools/metrics/base.py b/aws_lambda_powertools/metrics/base.py index 59ddda09b9c..c0c0fa2a2f8 100644 --- a/aws_lambda_powertools/metrics/base.py +++ b/aws_lambda_powertools/metrics/base.py @@ -542,7 +542,7 @@ def single_metric( name: str, unit: MetricUnit, value: float, - resolution: Optional[Union[MetricResolution, int]] = 60, + resolution: Union[MetricResolution, int] = 60, namespace: Optional[str] = None, default_dimensions: Optional[Dict[str, str]] = None, ) -> Generator[SingleMetric, None, None]: From 85753a494fb8ff490e6b5c1e3ec53b074882b590 Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Wed, 8 Feb 2023 23:05:50 +0000 Subject: [PATCH 05/11] feat(metrics) - Adding high resolution metrics - fix mypy errors --- aws_lambda_powertools/metrics/base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws_lambda_powertools/metrics/base.py b/aws_lambda_powertools/metrics/base.py index c0c0fa2a2f8..31ae3fd022a 100644 --- a/aws_lambda_powertools/metrics/base.py +++ b/aws_lambda_powertools/metrics/base.py @@ -516,7 +516,7 @@ def add_metric( name: str, unit: Union[MetricUnit, str], value: float, - resolution: Optional[Union[MetricResolution, int]] = 60, + resolution: Union[MetricResolution, int] = 60, ) -> None: """Method to prevent more than one metric being created From 40ce2ac602ece8ee67d24324df22717df3784ff4 Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Wed, 8 Feb 2023 23:12:24 +0000 Subject: [PATCH 06/11] feat(metrics) - Adding high resolution metrics - revert e2e wrong changes --- tests/e2e/metrics/test_metrics.py | 20 -------------------- 1 file changed, 20 deletions(-) diff --git a/tests/e2e/metrics/test_metrics.py b/tests/e2e/metrics/test_metrics.py index a76cff03068..192cbcc25af 100644 --- a/tests/e2e/metrics/test_metrics.py +++ b/tests/e2e/metrics/test_metrics.py @@ -48,26 +48,6 @@ def test_basic_lambda_metric_is_visible(basic_handler_fn: str, basic_handler_fn_ assert metric_values == [3.0] -@pytest.mark.xdist_group(name="metrics") -def test_metric_with_high_resolution(basic_handler_fn: str, basic_handler_fn_arn: str): - # GIVEN - metric_name = data_builder.build_metric_name() - service = data_builder.build_service_name() - dimensions = data_builder.build_add_dimensions_input(service=service) - metrics = data_builder.build_multiple_add_metric_input(metric_name=metric_name, value=1, quantity=3, resolution=1) - - # WHEN - event = json.dumps({"metrics": metrics, "service": service, "namespace": METRIC_NAMESPACE}) - _, execution_time = data_fetcher.get_lambda_response(lambda_arn=basic_handler_fn_arn, payload=event) - - metric_values = data_fetcher.get_metrics( - namespace=METRIC_NAMESPACE, start_date=execution_time, metric_name=metric_name, dimensions=dimensions - ) - - # THEN - assert metric_values == [3.0] - - @pytest.mark.xdist_group(name="metrics") def test_cold_start_metric(cold_start_fn_arn: str, cold_start_fn: str): # GIVEN From 0e3f933f0d33cb211d612204cf3af693eebc64e5 Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Thu, 9 Feb 2023 21:02:42 +0000 Subject: [PATCH 07/11] feat(metrics) - Addressing Heito's feedbacks --- aws_lambda_powertools/metrics/base.py | 35 ++++++---- aws_lambda_powertools/metrics/types.py | 7 ++ docs/core/metrics.md | 8 ++- tests/functional/test_metrics.py | 93 ++++++++++++++++++++------ 4 files changed, 110 insertions(+), 33 deletions(-) create mode 100644 aws_lambda_powertools/metrics/types.py diff --git a/aws_lambda_powertools/metrics/base.py b/aws_lambda_powertools/metrics/base.py index 31ae3fd022a..f3f8c0e09a5 100644 --- a/aws_lambda_powertools/metrics/base.py +++ b/aws_lambda_powertools/metrics/base.py @@ -18,6 +18,7 @@ MetricValueError, SchemaValidationError, ) +from .types import MetricNameUnitResolution logger = logging.getLogger(__name__) @@ -105,9 +106,9 @@ def __init__( self.service = resolve_env_var_choice(choice=service, env=os.getenv(constants.SERVICE_NAME_ENV)) self.metadata_set = metadata_set if metadata_set is not None else {} self._metric_units = [unit.value for unit in MetricUnit] - self._metric_unit_options = list(MetricUnit.__members__) + self._metric_unit_valid_options = list(MetricUnit.__members__) self._metric_resolutions = [resolution.value for resolution in MetricResolution] - self._metric_resolution_options = list(MetricResolution.__members__) + self._metric_resolution_valid_options = list(MetricResolution.__members__) def add_metric( self, @@ -224,9 +225,12 @@ def serialize_metric_set( logger.debug({"details": "Serializing metrics", "metrics": metrics, "dimensions": dimensions}) - metric_names_and_units_and_resolution: List[ - Dict[str, Union[str, int]] - ] = [] # [ { "Name": "metric_name", "Unit": "Count", "StorageResolution": 60 } ] + # For standard resolution metrics, don't add StorageResolution field to avoid unnecessary ingestion of data into cloudwatch # noqa E501 + # Example: [ { "Name": "metric_name", "Unit": "Count"} ] # noqa E800 + # + # In case using high-resolution metrics, add StorageResolution field + # Example: [ { "Name": "metric_name", "Unit": "Count", "StorageResolution": 1 } ] # noqa E800 + metric_definition: List[MetricNameUnitResolution] = [] metric_names_and_values: Dict[str, float] = {} # { "metric_name": 1.0 } for metric_name in metrics: @@ -235,9 +239,14 @@ def serialize_metric_set( metric_unit: str = metric.get("Unit", "") metric_resolution: int = metric.get("StorageResolution", 60) - metric_names_and_units_and_resolution.append( - {"Name": metric_name, "Unit": metric_unit, "StorageResolution": metric_resolution} - ) + metric_definition_data: MetricNameUnitResolution = {"Name": metric_name, "Unit": metric_unit} + + # high-resolution metrics + if metric_resolution == 1: + metric_definition_data["StorageResolution"] = metric_resolution + + metric_definition.append(metric_definition_data) + metric_names_and_values.update({metric_name: metric_value}) return { @@ -247,7 +256,7 @@ def serialize_metric_set( { "Namespace": self.namespace, # "test_namespace" "Dimensions": [list(dimensions.keys())], # [ "service" ] - "Metrics": metric_names_and_units_and_resolution, + "Metrics": metric_definition, } ], }, @@ -413,12 +422,12 @@ def _extract_metric_resolution_value(self, resolution: Union[int, MetricResoluti """ if isinstance(resolution, int): - if resolution in self._metric_resolution_options: + if resolution in self._metric_resolution_valid_options: resolution = MetricResolution[str(resolution)].value if resolution not in self._metric_resolutions: raise MetricResolutionError( - f"Invalid metric resolution '{resolution}', expected either option: {self._metric_resolution_options}" # noqa: E501 + f"Invalid metric resolution '{resolution}', expected either option: {self._metric_resolution_valid_options}" # noqa: E501 ) if isinstance(resolution, MetricResolution): @@ -446,12 +455,12 @@ def _extract_metric_unit_value(self, unit: Union[str, MetricUnit]) -> str: """ if isinstance(unit, str): - if unit in self._metric_unit_options: + if unit in self._metric_unit_valid_options: unit = MetricUnit[unit].value if unit not in self._metric_units: raise MetricUnitError( - f"Invalid metric unit '{unit}', expected either option: {self._metric_unit_options}" + f"Invalid metric unit '{unit}', expected either option: {self._metric_unit_valid_options}" ) if isinstance(unit, MetricUnit): diff --git a/aws_lambda_powertools/metrics/types.py b/aws_lambda_powertools/metrics/types.py new file mode 100644 index 00000000000..76fcf7bd18a --- /dev/null +++ b/aws_lambda_powertools/metrics/types.py @@ -0,0 +1,7 @@ +from typing_extensions import NotRequired, TypedDict + + +class MetricNameUnitResolution(TypedDict): + Name: str + Unit: str + StorageResolution: NotRequired[int] diff --git a/docs/core/metrics.md b/docs/core/metrics.md index 745b9aac479..f4bf54cced8 100644 --- a/docs/core/metrics.md +++ b/docs/core/metrics.md @@ -20,6 +20,9 @@ If you're new to Amazon CloudWatch, there are two terminologies you must be awar * **Namespace**. It's the highest level container that will group multiple metrics from multiple services for a given application, for example `ServerlessEcommerce`. * **Dimensions**. Metrics metadata in key-value format. They help you slice and dice metrics visualization, for example `ColdStart` metric by Payment `service`. +* **Metric**. It's the name of the metric, for example: `SuccessfulBooking` or `UpdatedBooking`. +* **Unit**. It's a value representing the unit of measure for the corresponding metric, for example: `Count` or `Seconds`. +* **Resolution**. It's a value representing the storage resolution for the corresponding metric. Metrics can be either Standard or High resolution. Read more [here](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/publishingMetrics.html#high-resolution-metrics).
@@ -82,9 +85,12 @@ You can create metrics using `add_metric`, and you can create dimensions for all You can create [high-resolution metrics](https://aws.amazon.com/pt/about-aws/whats-new/2023/02/amazon-cloudwatch-high-resolution-metric-extraction-structured-logs/) passing `resolution` parameter to `add_metric`. +???+ tip "High-resolution metrics - when is it useful?" + High-resolution metrics are data with a granularity of one second and are very useful in several situations such as telemetry, time series, real-time incident management, and others. + === "add_high_resolution_metrics.py" - ```python hl_lines="14-15" + ```python hl_lines="10" --8<-- "examples/metrics/src/add_high_resolution_metric.py" ``` diff --git a/tests/functional/test_metrics.py b/tests/functional/test_metrics.py index dc38968886a..cc453dce5fb 100644 --- a/tests/functional/test_metrics.py +++ b/tests/functional/test_metrics.py @@ -32,23 +32,28 @@ def reset_metric_set(): @pytest.fixture -def metric() -> Dict[str, Union[str, int]]: - return {"name": "single_metric", "unit": MetricUnit.Count, "value": 1, "resolution": MetricResolution.Standard} +def metric_with_resolution() -> Dict[str, Union[str, int]]: + return {"name": "single_metric", "unit": MetricUnit.Count, "value": 1, "resolution": MetricResolution.High} @pytest.fixture -def metrics() -> List[Dict[str, Union[str, int]]]: +def metric() -> Dict[str, str]: + return {"name": "single_metric", "unit": MetricUnit.Count, "value": 1} + + +@pytest.fixture +def metrics() -> List[Dict[str, str]]: return [ - {"name": "metric_one", "unit": MetricUnit.Count, "value": 1, "resolution": MetricResolution.Standard}, - {"name": "metric_two", "unit": MetricUnit.Count, "value": 1, "resolution": MetricResolution.Standard}, + {"name": "metric_one", "unit": MetricUnit.Count, "value": 1}, + {"name": "metric_two", "unit": MetricUnit.Count, "value": 1}, ] @pytest.fixture -def metrics_same_name() -> List[Dict[str, Union[str, int]]]: +def metrics_same_name() -> List[Dict[str, str]]: return [ - {"name": "metric_one", "unit": MetricUnit.Count, "value": 1, "resolution": MetricResolution.Standard}, - {"name": "metric_one", "unit": MetricUnit.Count, "value": 5, "resolution": MetricResolution.Standard}, + {"name": "metric_one", "unit": MetricUnit.Count, "value": 1}, + {"name": "metric_one", "unit": MetricUnit.Count, "value": 5}, ] @@ -143,6 +148,21 @@ def capture_metrics_output_multiple_emf_objects(capsys): return [json.loads(line.strip()) for line in capsys.readouterr().out.split("\n") if line] +def test_single_metric_logs_one_metric_only_with_high_resolution(capsys, metric_with_resolution, dimension, namespace): + # GIVEN we try adding more than one metric + # WHEN using single_metric context manager + with single_metric(namespace=namespace, **metric_with_resolution) as my_metric: + my_metric.add_metric(name="second_metric", unit="Count", value=1, resolution=1) + my_metric.add_dimension(**dimension) + + output = capture_metrics_output(capsys) + expected = serialize_single_metric(metric=metric_with_resolution, dimension=dimension, namespace=namespace) + + # THEN we should only have the first metric added + remove_timestamp(metrics=[output, expected]) + assert expected == output + + def test_single_metric_logs_one_metric_only(capsys, metric, dimension, namespace): # GIVEN we try adding more than one metric # WHEN using single_metric context manager @@ -163,7 +183,7 @@ def test_single_metric_default_dimensions(capsys, metric, dimension, namespace): # WHEN using single_metric context manager default_dimensions = {dimension["name"]: dimension["value"]} with single_metric(namespace=namespace, default_dimensions=default_dimensions, **metric) as my_metric: - my_metric.add_metric(name="second_metric", unit="Count", value=1, resolution=60) + my_metric.add_metric(name="second_metric", unit="Count", value=1) output = capture_metrics_output(capsys) expected = serialize_single_metric(metric=metric, dimension=dimension, namespace=namespace) @@ -345,24 +365,24 @@ def lambda_handler(evt, context): assert lambda_handler({}, {}) is True -def test_schema_validation_incorrect_metric_unit(metric, dimension, namespace): - # GIVEN we pass a metric unit that is not supported by CloudWatch - metric["unit"] = "incorrect_unit" +def test_schema_validation_incorrect_metric_resolution(metric, dimension, namespace): + # GIVEN we pass a metric resolution that is not supported by CloudWatch + metric["resolution"] = 10 # metric resolution must be 1 (High) or 60 (Standard) # WHEN we try adding a new metric # THEN it should fail metric unit validation - with pytest.raises(MetricUnitError): + with pytest.raises(MetricResolutionError): with single_metric(**metric) as my_metric: my_metric.add_dimension(**dimension) -def test_schema_validation_incorrect_metric_resolution(metric, dimension, namespace): - # GIVEN we pass a metric resolution that is not supported by CloudWatch - metric["resolution"] = 10 # metric resolution must be 1 (High) or 60 (Standard) +def test_schema_validation_incorrect_metric_unit(metric, dimension, namespace): + # GIVEN we pass a metric unit that is not supported by CloudWatch + metric["unit"] = "incorrect_unit" # WHEN we try adding a new metric # THEN it should fail metric unit validation - with pytest.raises(MetricResolutionError): + with pytest.raises(MetricUnitError): with single_metric(**metric) as my_metric: my_metric.add_dimension(**dimension) @@ -762,6 +782,41 @@ def lambda_handler(evt, ctx): assert expected == output +def test_serialize_high_resolution_metric_set_metric_definition( + metric_with_resolution, dimension, namespace, service, metadata +): + expected_metric_definition = { + "single_metric": [1.0], + "_aws": { + "Timestamp": 1592237875494, + "CloudWatchMetrics": [ + { + "Namespace": "test_namespace", + "Dimensions": [["test_dimension", "service"]], + "Metrics": [{"Name": "single_metric", "Unit": "Count", "StorageResolution": 1}], + } + ], + }, + "service": "test_service", + "username": "test", + "test_dimension": "test", + } + + # GIVEN Metrics is initialized + my_metrics = Metrics(service=service, namespace=namespace) + my_metrics.add_metric(**metric_with_resolution) + my_metrics.add_dimension(**dimension) + my_metrics.add_metadata(**metadata) + + # WHEN metrics are serialized manually + metric_definition_output = my_metrics.serialize_metric_set() + + # THEN we should emit a valid embedded metric definition object + assert "Timestamp" in metric_definition_output["_aws"] + remove_timestamp(metrics=[metric_definition_output, expected_metric_definition]) + assert metric_definition_output == expected_metric_definition + + def test_serialize_metric_set_metric_definition(metric, dimension, namespace, service, metadata): expected_metric_definition = { "single_metric": [1.0], @@ -771,7 +826,7 @@ def test_serialize_metric_set_metric_definition(metric, dimension, namespace, se { "Namespace": "test_namespace", "Dimensions": [["test_dimension", "service"]], - "Metrics": [{"Name": "single_metric", "Unit": "Count", "StorageResolution": 60}], + "Metrics": [{"Name": "single_metric", "Unit": "Count"}], } ], }, @@ -865,7 +920,7 @@ def test_serialize_metric_set_metric_definition_multiple_values( { "Namespace": "test_namespace", "Dimensions": [["test_dimension", "service"]], - "Metrics": [{"Name": "metric_one", "Unit": "Count", "StorageResolution": 60}], + "Metrics": [{"Name": "metric_one", "Unit": "Count"}], } ], }, From 1e8974fda3daa2e2ed84ec106ec21f62cf6f2a28 Mon Sep 17 00:00:00 2001 From: heitorlessa Date: Fri, 10 Feb 2023 14:56:17 +0100 Subject: [PATCH 08/11] chore: cover additional logic branches on resolution --- aws_lambda_powertools/metrics/base.py | 15 ++++++--------- tests/functional/test_metrics.py | 16 ++++++++++++++++ 2 files changed, 22 insertions(+), 9 deletions(-) diff --git a/aws_lambda_powertools/metrics/base.py b/aws_lambda_powertools/metrics/base.py index f3f8c0e09a5..4f00558ce10 100644 --- a/aws_lambda_powertools/metrics/base.py +++ b/aws_lambda_powertools/metrics/base.py @@ -421,17 +421,14 @@ def _extract_metric_resolution_value(self, resolution: Union[int, MetricResoluti When metric resolution is not supported by CloudWatch """ - if isinstance(resolution, int): - if resolution in self._metric_resolution_valid_options: - resolution = MetricResolution[str(resolution)].value - - if resolution not in self._metric_resolutions: - raise MetricResolutionError( - f"Invalid metric resolution '{resolution}', expected either option: {self._metric_resolution_valid_options}" # noqa: E501 - ) - if isinstance(resolution, MetricResolution): resolution = resolution.value + return resolution + + if isinstance(resolution, int) and resolution not in self._metric_resolutions: + raise MetricResolutionError( + f"Invalid metric resolution '{resolution}', expected either option: {self._metric_resolution_valid_options}" # noqa: E501 + ) return resolution diff --git a/tests/functional/test_metrics.py b/tests/functional/test_metrics.py index cc453dce5fb..07511f03244 100644 --- a/tests/functional/test_metrics.py +++ b/tests/functional/test_metrics.py @@ -163,6 +163,22 @@ def test_single_metric_logs_one_metric_only_with_high_resolution(capsys, metric_ assert expected == output +def test_single_metric_logs_with_high_resolution_integer(capsys, metric_with_resolution, dimension, namespace): + # GIVEN we have a metric with high resolution as integer + metric_with_resolution["resolution"] = MetricResolution.High.value + + # WHEN using single_metric context manager + with single_metric(namespace=namespace, **metric_with_resolution) as my_metric: + my_metric.add_dimension(**dimension) + + # THEN we should only have the first metric added + output = capture_metrics_output(capsys) + expected = serialize_single_metric(metric=metric_with_resolution, dimension=dimension, namespace=namespace) + + remove_timestamp(metrics=[output, expected]) + assert expected == output + + def test_single_metric_logs_one_metric_only(capsys, metric, dimension, namespace): # GIVEN we try adding more than one metric # WHEN using single_metric context manager From 64ff2d37779084731df799cbca93e0e639047bf8 Mon Sep 17 00:00:00 2001 From: heitorlessa Date: Fri, 10 Feb 2023 14:57:52 +0100 Subject: [PATCH 09/11] chore: cleanup test name for consistency --- tests/functional/test_metrics.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/tests/functional/test_metrics.py b/tests/functional/test_metrics.py index 07511f03244..057a8a16fba 100644 --- a/tests/functional/test_metrics.py +++ b/tests/functional/test_metrics.py @@ -148,17 +148,16 @@ def capture_metrics_output_multiple_emf_objects(capsys): return [json.loads(line.strip()) for line in capsys.readouterr().out.split("\n") if line] -def test_single_metric_logs_one_metric_only_with_high_resolution(capsys, metric_with_resolution, dimension, namespace): - # GIVEN we try adding more than one metric +def test_single_metric_logs_with_high_resolution_enum(capsys, metric_with_resolution, dimension, namespace): + # GIVEN we have a metric with high resolution as enum # WHEN using single_metric context manager with single_metric(namespace=namespace, **metric_with_resolution) as my_metric: - my_metric.add_metric(name="second_metric", unit="Count", value=1, resolution=1) my_metric.add_dimension(**dimension) + # THEN we should only have the first metric added output = capture_metrics_output(capsys) expected = serialize_single_metric(metric=metric_with_resolution, dimension=dimension, namespace=namespace) - # THEN we should only have the first metric added remove_timestamp(metrics=[output, expected]) assert expected == output From fd2b4d9db44b245870168e480b191959ab7295bb Mon Sep 17 00:00:00 2001 From: heitorlessa Date: Fri, 10 Feb 2023 15:02:33 +0100 Subject: [PATCH 10/11] fix: make resolution error actionable --- aws_lambda_powertools/metrics/base.py | 3 +-- tests/functional/test_metrics.py | 4 ++-- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/aws_lambda_powertools/metrics/base.py b/aws_lambda_powertools/metrics/base.py index 4f00558ce10..ebb0ca7481b 100644 --- a/aws_lambda_powertools/metrics/base.py +++ b/aws_lambda_powertools/metrics/base.py @@ -108,7 +108,6 @@ def __init__( self._metric_units = [unit.value for unit in MetricUnit] self._metric_unit_valid_options = list(MetricUnit.__members__) self._metric_resolutions = [resolution.value for resolution in MetricResolution] - self._metric_resolution_valid_options = list(MetricResolution.__members__) def add_metric( self, @@ -427,7 +426,7 @@ def _extract_metric_resolution_value(self, resolution: Union[int, MetricResoluti if isinstance(resolution, int) and resolution not in self._metric_resolutions: raise MetricResolutionError( - f"Invalid metric resolution '{resolution}', expected either option: {self._metric_resolution_valid_options}" # noqa: E501 + f"Invalid metric resolution '{resolution}', expected either option: {self._metric_resolutions}" # noqa: E501 ) return resolution diff --git a/tests/functional/test_metrics.py b/tests/functional/test_metrics.py index 057a8a16fba..85b4e497b4f 100644 --- a/tests/functional/test_metrics.py +++ b/tests/functional/test_metrics.py @@ -380,13 +380,13 @@ def lambda_handler(evt, context): assert lambda_handler({}, {}) is True -def test_schema_validation_incorrect_metric_resolution(metric, dimension, namespace): +def test_schema_validation_incorrect_metric_resolution(metric, dimension): # GIVEN we pass a metric resolution that is not supported by CloudWatch metric["resolution"] = 10 # metric resolution must be 1 (High) or 60 (Standard) # WHEN we try adding a new metric # THEN it should fail metric unit validation - with pytest.raises(MetricResolutionError): + with pytest.raises(MetricResolutionError, match="Invalid metric resolution.*60"): with single_metric(**metric) as my_metric: my_metric.add_dimension(**dimension) From c2bb45ba84e5f98794fb397f92901f15bb495b73 Mon Sep 17 00:00:00 2001 From: heitorlessa Date: Fri, 10 Feb 2023 15:22:48 +0100 Subject: [PATCH 11/11] fix: raise resolution error for all invalid options --- aws_lambda_powertools/metrics/base.py | 14 ++++++-------- tests/functional/test_metrics.py | 12 ++++++++++++ 2 files changed, 18 insertions(+), 8 deletions(-) diff --git a/aws_lambda_powertools/metrics/base.py b/aws_lambda_powertools/metrics/base.py index ebb0ca7481b..67dcb47c282 100644 --- a/aws_lambda_powertools/metrics/base.py +++ b/aws_lambda_powertools/metrics/base.py @@ -419,17 +419,15 @@ def _extract_metric_resolution_value(self, resolution: Union[int, MetricResoluti MetricResolutionError When metric resolution is not supported by CloudWatch """ - if isinstance(resolution, MetricResolution): - resolution = resolution.value - return resolution + return resolution.value - if isinstance(resolution, int) and resolution not in self._metric_resolutions: - raise MetricResolutionError( - f"Invalid metric resolution '{resolution}', expected either option: {self._metric_resolutions}" # noqa: E501 - ) + if isinstance(resolution, int) and resolution in self._metric_resolutions: + return resolution - return resolution + raise MetricResolutionError( + f"Invalid metric resolution '{resolution}', expected either option: {self._metric_resolutions}" # noqa: E501 + ) def _extract_metric_unit_value(self, unit: Union[str, MetricUnit]) -> str: """Return metric value from metric unit whether that's str or MetricUnit enum diff --git a/tests/functional/test_metrics.py b/tests/functional/test_metrics.py index 85b4e497b4f..2a53b42cd16 100644 --- a/tests/functional/test_metrics.py +++ b/tests/functional/test_metrics.py @@ -391,6 +391,18 @@ def test_schema_validation_incorrect_metric_resolution(metric, dimension): my_metric.add_dimension(**dimension) +@pytest.mark.parametrize("resolution", ["sixty", False, [], {}, object]) +def test_schema_validation_incorrect_metric_resolution_non_integer_enum(metric, dimension, resolution, namespace): + # GIVEN we pass a metric resolution that is not supported by CloudWatch + metric["resolution"] = resolution # metric resolution must be 1 (High) or 60 (Standard) + + # WHEN we try adding a new metric + # THEN it should fail metric unit validation + with pytest.raises(MetricResolutionError, match="Invalid metric resolution.*60"): + with single_metric(namespace=namespace, **metric) as my_metric: + my_metric.add_dimension(**dimension) + + def test_schema_validation_incorrect_metric_unit(metric, dimension, namespace): # GIVEN we pass a metric unit that is not supported by CloudWatch metric["unit"] = "incorrect_unit"