From 54cf7d8a109bb647cdf87cec5a11b1c40d722c36 Mon Sep 17 00:00:00 2001 From: heitorlessa Date: Thu, 22 Apr 2021 17:17:50 +0200 Subject: [PATCH 01/11] feat: add support to persist default dimensions --- aws_lambda_powertools/metrics/metrics.py | 47 ++++++++++++++++++----- tests/functional/test_metrics.py | 48 ++++++++++++++++++++++++ 2 files changed, 85 insertions(+), 10 deletions(-) diff --git a/aws_lambda_powertools/metrics/metrics.py b/aws_lambda_powertools/metrics/metrics.py index 59d3b18e0e4..dcafc8faefc 100644 --- a/aws_lambda_powertools/metrics/metrics.py +++ b/aws_lambda_powertools/metrics/metrics.py @@ -34,18 +34,13 @@ class Metrics(MetricManager): from aws_lambda_powertools import Metrics metrics = Metrics(namespace="ServerlessAirline", service="payment") - metrics.add_metric(name="ColdStart", unit=MetricUnit.Count, value=1) - metrics.add_metric(name="BookingConfirmation", unit="Count", value=1) - metrics.add_dimension(name="function_version", value="$LATEST") - ... - @metrics.log_metrics() + @metrics.log_metrics(capture_cold_start_metric=True) def lambda_handler(): - do_something() - return True + metrics.add_metric(name="BookingConfirmation", unit="Count", value=1) + metrics.add_dimension(name="function_version", value="$LATEST") - def do_something(): - metrics.add_metric(name="Something", unit="Count", value=1) + return True Environment variables --------------------- @@ -74,13 +69,15 @@ def do_something(): _metrics: Dict[str, Any] = {} _dimensions: Dict[str, str] = {} _metadata: Dict[str, Any] = {} + _default_dimensions: Dict[str, Any] = {} def __init__(self, service: str = None, namespace: str = None): self.metric_set = self._metrics - self.dimension_set = self._dimensions self.service = service self.namespace: Optional[str] = namespace self.metadata_set = self._metadata + self.default_dimensions = self._default_dimensions + self.dimension_set = {**self._default_dimensions, **self._dimensions} super().__init__( metric_set=self.metric_set, @@ -90,11 +87,41 @@ def __init__(self, service: str = None, namespace: str = None): service=self.service, ) + def set_default_dimensions(self, **dimensions): + """Persist dimensions across Lambda invocations + + Parameters + ---------- + dimensions : Dict[str, Any], optional + metric dimensions as key=value + + Example + ------- + **Sets some default dimensions that will always be present across metrics and invocations** + + from aws_lambda_powertools import Metrics + + metrics = Metrics(namespace="ServerlessAirline", service="payment") + metrics.set_default_dimensions(environment="demo", another="one") + + @metrics.log_metrics() + def lambda_handler(): + return True + """ + self.default_dimensions.update(**dimensions) + for name, value in dimensions.items(): + self.add_dimension(name, value) + + def clear_default_dimensions(self): + self.default_dimensions.clear() + def clear_metrics(self): logger.debug("Clearing out existing metric set from memory") self.metric_set.clear() self.dimension_set.clear() self.metadata_set.clear() + # re-add default dimensions + self.dimension_set.update(**self._default_dimensions) def log_metrics( self, diff --git a/tests/functional/test_metrics.py b/tests/functional/test_metrics.py index 3090a1228d2..361c8ec6591 100644 --- a/tests/functional/test_metrics.py +++ b/tests/functional/test_metrics.py @@ -749,3 +749,51 @@ def test_metric_manage_metadata_set(): assert metric.metadata_set == expected_dict except AttributeError: pytest.fail("AttributeError should not be raised") + + +def test_log_persist_default_dimensions(capsys, metrics, dimensions, namespace): + # GIVEN Metrics is initialized and we persist a set of default dimensions + my_metrics = Metrics(namespace=namespace) + my_metrics.set_default_dimensions(environment="test", log_group="/lambda/test") + + # WHEN we utilize log_metrics to serialize + # and flush metrics and clear all metrics and dimensions from memory + # at the end of a function execution + @my_metrics.log_metrics + def lambda_handler(evt, ctx): + for metric in metrics: + my_metrics.add_metric(**metric) + + lambda_handler({}, {}) + first_invocation = capture_metrics_output(capsys) + + lambda_handler({}, {}) + second_invocation = capture_metrics_output(capsys) + + # THEN we should have default dimensions in both outputs + assert "environment" in first_invocation + assert "environment" in second_invocation + + +def test_clear_default_dimensions(namespace): + # GIVEN Metrics is initialized and we persist a set of default dimensions + my_metrics = Metrics(namespace=namespace) + my_metrics.set_default_dimensions(environment="test", log_group="/lambda/test") + + # WHEN they are removed via clear_default_dimensions method + my_metrics.clear_default_dimensions() + + # THEN there should be no default dimensions + assert not my_metrics.default_dimensions + + +def test_default_dimensions_across_instances(namespace): + # GIVEN Metrics is initialized and we persist a set of default dimensions + my_metrics = Metrics(namespace=namespace) + my_metrics.set_default_dimensions(environment="test", log_group="/lambda/test") + + # WHEN a new Metrics instance is created + same_metrics = Metrics() + + # THEN default dimensions should also be present + assert "environment" in same_metrics.default_dimensions From 2aa27bca25267b64c177b8e623c2fe24ca7e8ac9 Mon Sep 17 00:00:00 2001 From: heitorlessa Date: Thu, 22 Apr 2021 17:29:02 +0200 Subject: [PATCH 02/11] fix: move all mutable ops inside the handler --- docs/core/metrics.md | 58 +++++++++++++++++++++++++------------------- 1 file changed, 33 insertions(+), 25 deletions(-) diff --git a/docs/core/metrics.md b/docs/core/metrics.md index 984ad760d28..dac10e5f882 100644 --- a/docs/core/metrics.md +++ b/docs/core/metrics.md @@ -74,22 +74,28 @@ You can create metrics using `add_metric`, and you can create dimensions for all === "Metrics" - ```python hl_lines="5" + ```python hl_lines="8" from aws_lambda_powertools import Metrics from aws_lambda_powertools.metrics import MetricUnit metrics = Metrics(namespace="ExampleApplication", service="booking") - metrics.add_metric(name="SuccessfulBooking", unit=MetricUnit.Count, value=1) + + @metrics.log_metrics + def lambda_handler(evt, ctx): + metrics.add_metric(name="SuccessfulBooking", unit=MetricUnit.Count, value=1) ``` === "Metrics with custom dimensions" - ```python hl_lines="5 6" + ```python hl_lines="8-9" from aws_lambda_powertools import Metrics from aws_lambda_powertools.metrics import MetricUnit metrics = Metrics(namespace="ExampleApplication", service="booking") - metrics.add_dimension(name="environment", value="prod") - metrics.add_metric(name="SuccessfulBooking", unit=MetricUnit.Count, value=1) + + @metrics.log_metrics + def lambda_handler(evt, ctx): + metrics.add_dimension(name="environment", value="prod") + metrics.add_metric(name="SuccessfulBooking", unit=MetricUnit.Count, value=1) ``` !!! tip "Autocomplete Metric Units" @@ -106,7 +112,7 @@ This decorator also **validates**, **serializes**, and **flushes** all your metr === "app.py" - ```python hl_lines="7" + ```python hl_lines="6" from aws_lambda_powertools import Metrics from aws_lambda_powertools.metrics import MetricUnit @@ -115,7 +121,6 @@ This decorator also **validates**, **serializes**, and **flushes** all your metr @metrics.log_metrics def lambda_handler(evt, ctx): metrics.add_metric(name="BookingConfirmation", unit=MetricUnit.Count, value=1) - ... ``` === "Example CloudWatch Logs excerpt" @@ -158,7 +163,7 @@ If you want to ensure that at least one metric is emitted, you can pass `raise_o === "app.py" - ```python hl_lines="3" + ```python hl_lines="5" from aws_lambda_powertools.metrics import Metrics metrics = Metrics() @@ -177,20 +182,17 @@ When using multiple middlewares, use `log_metrics` as your **last decorator** wr === "nested_middlewares.py" - ```python hl_lines="9-10" + ```python hl_lines="7-8" from aws_lambda_powertools import Metrics, Tracer from aws_lambda_powertools.metrics import MetricUnit tracer = Tracer(service="booking") metrics = Metrics(namespace="ExampleApplication", service="booking") - metrics.add_metric(name="ColdStart", unit=MetricUnit.Count, value=1) - @metrics.log_metrics @tracer.capture_lambda_handler def lambda_handler(evt, ctx): metrics.add_metric(name="BookingConfirmation", unit=MetricUnit.Count, value=1) - ... ``` ### Capturing cold start metric @@ -199,7 +201,7 @@ You can optionally capture cold start metrics with `log_metrics` decorator via ` === "app.py" - ```python hl_lines="6" + ```python hl_lines="5" from aws_lambda_powertools import Metrics metrics = Metrics(service="ExampleService") @@ -227,13 +229,16 @@ You can add high-cardinality data as part of your Metrics log with `add_metadata === "app.py" - ```python hl_lines="6" + ```python hl_lines="9" from aws_lambda_powertools import Metrics from aws_lambda_powertools.metrics import MetricUnit metrics = Metrics(namespace="ExampleApplication", service="booking") - metrics.add_metric(name="SuccessfulBooking", unit=MetricUnit.Count, value=1) - metrics.add_metadata(key="booking_id", value="booking_uuid") + + @metrics.log_metrics + def lambda_handler(evt, ctx): + metrics.add_metric(name="SuccessfulBooking", unit=MetricUnit.Count, value=1) + metrics.add_metadata(key="booking_id", value="booking_uuid") ``` === "Example CloudWatch Logs excerpt" @@ -276,13 +281,15 @@ CloudWatch EMF uses the same dimensions across all your metrics. Use `single_met === "single_metric.py" - ```python hl_lines="4" + ```python hl_lines="6-7" from aws_lambda_powertools import single_metric from aws_lambda_powertools.metrics import MetricUnit - with single_metric(name="ColdStart", unit=MetricUnit.Count, value=1, namespace="ExampleApplication") as metric: - metric.add_dimension(name="function_context", value="$LATEST") - ... + + def lambda_handler(evt, ctx): + with single_metric(name="ColdStart", unit=MetricUnit.Count, value=1, namespace="ExampleApplication") as metric: + metric.add_dimension(name="function_context", value="$LATEST") + ... ``` ### Flushing metrics manually @@ -294,17 +301,18 @@ If you prefer not to use `log_metrics` because you might want to encapsulate add === "manual_metric_serialization.py" - ```python hl_lines="8-10" + ```python hl_lines="9-11" import json from aws_lambda_powertools import Metrics from aws_lambda_powertools.metrics import MetricUnit metrics = Metrics(namespace="ExampleApplication", service="booking") - metrics.add_metric(name="ColdStart", unit=MetricUnit.Count, value=1) - your_metrics_object = metrics.serialize_metric_set() - metrics.clear_metrics() - print(json.dumps(your_metrics_object)) + def lambda_handler(evt, ctx): + metrics.add_metric(name="ColdStart", unit=MetricUnit.Count, value=1) + your_metrics_object = metrics.serialize_metric_set() + metrics.clear_metrics() + print(json.dumps(your_metrics_object)) ``` ## Testing your code From 7eb9e891f6f0cd5ee4df443f10cc80911aac8979 Mon Sep 17 00:00:00 2001 From: heitorlessa Date: Thu, 22 Apr 2021 17:49:17 +0200 Subject: [PATCH 03/11] chore: ensure default dimensions are always cleared --- tests/functional/test_metrics.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/functional/test_metrics.py b/tests/functional/test_metrics.py index 361c8ec6591..66ce4142eae 100644 --- a/tests/functional/test_metrics.py +++ b/tests/functional/test_metrics.py @@ -15,6 +15,7 @@ def reset_metric_set(): metrics = Metrics() metrics.clear_metrics() + metrics.clear_default_dimensions() metrics_global.is_cold_start = True # ensure each test has cold start yield From 5755f261ad6411a16f382c332c8dfe4f1d0e2b1c Mon Sep 17 00:00:00 2001 From: heitorlessa Date: Thu, 22 Apr 2021 17:50:01 +0200 Subject: [PATCH 04/11] docs: add new default dimensions feature, improve testing section --- docs/core/metrics.md | 58 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 58 insertions(+) diff --git a/docs/core/metrics.md b/docs/core/metrics.md index dac10e5f882..d678ab2085f 100644 --- a/docs/core/metrics.md +++ b/docs/core/metrics.md @@ -104,6 +104,28 @@ You can create metrics using `add_metric`, and you can create dimensions for all !!! note "Metrics overflow" CloudWatch EMF supports a max of 100 metrics per batch. Metrics utility will flush all metrics when adding the 100th metric. Subsequent metrics, e.g. 101th, will be aggregated into a new EMF object, for your convenience. + +### Adding default dimensions + +You can add default metric dimensions to ensure they are persisted across Lambda invocations using `set_default_dimenions`. + +!!! info "If you'd like to remove them at some point, you can use `clear_default_dimensions` method" + Note that they continue to count against the maximum of 9 dimensions. + +=== "Default dimensions" + + ```python hl_lines="5" + from aws_lambda_powertools import Metrics + from aws_lambda_powertools.metrics import MetricUnit + + metrics = Metrics(namespace="ExampleApplication", service="booking") + metrics.set_default_dimensions(environment="prod", another="one") + + @metrics.log_metrics + def lambda_handler(evt, ctx): + metrics.add_metric(name="SuccessfulBooking", unit=MetricUnit.Count, value=1) + ``` + ### Flushing metrics As you finish adding all your metrics, you need to serialize and flush them to standard output. You can do that automatically with the `log_metrics` decorator. @@ -353,5 +375,41 @@ If you prefer setting environment variable for specific tests, and are using Pyt metrics = Metrics() metrics.clear_metrics() metrics_global.is_cold_start = True # ensure each test has cold start + metrics.clear_default_dimensions() # remove persisted default dimensions, if any yield ``` + +### Inspecting metrics + +As metrics are logged to standard output, you can read stdoutput and assert whether metrics are present. Here's an example using `pytest` with `capsys` built-in fixture: + +=== "pytest_metrics_assertion.py" + + ```python hl_lines="6 9-10 23-34" + from aws_lambda_powertools import Metrics + from aws_lambda_powertools.metrics import MetricUnit + + import json + + def test_log_metrics(capsys): + # GIVEN Metrics is initialized + metrics = Metrics(namespace="ServerlessAirline") + + # WHEN we utilize log_metrics to serialize + # and flush all metrics at the end of a function execution + @metrics.log_metrics + def lambda_handler(evt, ctx): + metrics.add_metric(name="SuccessfulBooking", unit=MetricUnit.Count, value=1) + metrics.add_dimension(name="environment", value="prod") + + lambda_handler({}, {}) + log = capsys.readouterr().out.strip() # remove any extra line + metrics_output = json.loads(log) # deserialize JSON str + + # THEN we should have no exceptions + # and a valid EMF object should be flushed correctly + assert "SuccessfulBooking" in log # basic string assertion in JSON str + assert "SuccessfulBooking" in metrics_output["_aws"]["CloudWatchMetrics"][0]["Metrics"][0]["Name"] + ``` + +!!! tip "For more elaborate assertions and comparisons, check out [our functional testing for Metrics utility](https://github.com/awslabs/aws-lambda-powertools-python/blob/develop/tests/functional/test_metrics.py)" From 2736fdd1af8fa3dd3ce7de170a9fc30a9b208665 Mon Sep 17 00:00:00 2001 From: heitorlessa Date: Thu, 22 Apr 2021 21:23:56 +0200 Subject: [PATCH 05/11] fix: default dimension value type --- aws_lambda_powertools/metrics/metrics.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/aws_lambda_powertools/metrics/metrics.py b/aws_lambda_powertools/metrics/metrics.py index dcafc8faefc..731b179d163 100644 --- a/aws_lambda_powertools/metrics/metrics.py +++ b/aws_lambda_powertools/metrics/metrics.py @@ -108,10 +108,11 @@ def set_default_dimensions(self, **dimensions): def lambda_handler(): return True """ - self.default_dimensions.update(**dimensions) for name, value in dimensions.items(): self.add_dimension(name, value) + self.default_dimensions.update(**dimensions) + def clear_default_dimensions(self): self.default_dimensions.clear() @@ -120,8 +121,7 @@ def clear_metrics(self): self.metric_set.clear() self.dimension_set.clear() self.metadata_set.clear() - # re-add default dimensions - self.dimension_set.update(**self._default_dimensions) + self.set_default_dimensions(**self.default_dimensions) # re-add default dimensions def log_metrics( self, From a612407797846505697f1d1d5361be39fb57655b Mon Sep 17 00:00:00 2001 From: heitorlessa Date: Thu, 22 Apr 2021 21:27:05 +0200 Subject: [PATCH 06/11] docs: remove confusing wording on limit --- docs/core/metrics.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/docs/core/metrics.md b/docs/core/metrics.md index d678ab2085f..e1ce63bc558 100644 --- a/docs/core/metrics.md +++ b/docs/core/metrics.md @@ -109,8 +109,7 @@ You can create metrics using `add_metric`, and you can create dimensions for all You can add default metric dimensions to ensure they are persisted across Lambda invocations using `set_default_dimenions`. -!!! info "If you'd like to remove them at some point, you can use `clear_default_dimensions` method" - Note that they continue to count against the maximum of 9 dimensions. +If you'd like to remove them at some point, you can use `clear_default_dimensions` method. === "Default dimensions" From 23d13b12e64d2304b54087734dc341de8a451e69 Mon Sep 17 00:00:00 2001 From: heitorlessa Date: Thu, 22 Apr 2021 21:28:29 +0200 Subject: [PATCH 07/11] chore: remove extra identation --- docs/core/metrics.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/core/metrics.md b/docs/core/metrics.md index e1ce63bc558..6877c29b7af 100644 --- a/docs/core/metrics.md +++ b/docs/core/metrics.md @@ -374,7 +374,7 @@ If you prefer setting environment variable for specific tests, and are using Pyt metrics = Metrics() metrics.clear_metrics() metrics_global.is_cold_start = True # ensure each test has cold start - metrics.clear_default_dimensions() # remove persisted default dimensions, if any + metrics.clear_default_dimensions() # remove persisted default dimensions, if any yield ``` From 157a767eea41f264ad0d8d1843edd3e43e335acc Mon Sep 17 00:00:00 2001 From: heitorlessa Date: Fri, 23 Apr 2021 11:48:14 +0200 Subject: [PATCH 08/11] feat: address Pablo's feedback with a decorator param --- aws_lambda_powertools/metrics/metrics.py | 12 +++++++++--- docs/core/metrics.md | 17 +++++++++++++++-- tests/functional/test_metrics.py | 24 ++++++++++++++++++++++++ 3 files changed, 48 insertions(+), 5 deletions(-) diff --git a/aws_lambda_powertools/metrics/metrics.py b/aws_lambda_powertools/metrics/metrics.py index 731b179d163..8cc4895f03e 100644 --- a/aws_lambda_powertools/metrics/metrics.py +++ b/aws_lambda_powertools/metrics/metrics.py @@ -128,6 +128,7 @@ def log_metrics( lambda_handler: Callable[[Any, Any], Any] = None, capture_cold_start_metric: bool = False, raise_on_empty_metrics: bool = False, + default_dimensions: Dict[str, str] = None, ): """Decorator to serialize and publish metrics at the end of a function execution. @@ -150,11 +151,13 @@ def handler(event, context): Parameters ---------- lambda_handler : Callable[[Any, Any], Any], optional - Lambda function handler, by default None + lambda function handler, by default None capture_cold_start_metric : bool, optional - Captures cold start metric, by default False + captures cold start metric, by default False raise_on_empty_metrics : bool, optional - Raise exception if no metrics are emitted, by default False + raise exception if no metrics are emitted, by default False + default_dimensions: Dict[str, str], optional + metric dimensions as key=value that will always be present Raises ------ @@ -170,11 +173,14 @@ def handler(event, context): self.log_metrics, capture_cold_start_metric=capture_cold_start_metric, raise_on_empty_metrics=raise_on_empty_metrics, + default_dimensions=default_dimensions, ) @functools.wraps(lambda_handler) def decorate(event, context): try: + if default_dimensions: + self.set_default_dimensions(**default_dimensions) response = lambda_handler(event, context) if capture_cold_start_metric: self.__add_cold_start_metric(context=context) diff --git a/docs/core/metrics.md b/docs/core/metrics.md index 6877c29b7af..b3631a86f83 100644 --- a/docs/core/metrics.md +++ b/docs/core/metrics.md @@ -107,11 +107,11 @@ You can create metrics using `add_metric`, and you can create dimensions for all ### Adding default dimensions -You can add default metric dimensions to ensure they are persisted across Lambda invocations using `set_default_dimenions`. +You can use either `set_default_dimensions` method or `default_permissions` parameter in `log_metrics` decorator to persist dimensions across Lambda invocations. If you'd like to remove them at some point, you can use `clear_default_dimensions` method. -=== "Default dimensions" +=== "set_default_dimensions method" ```python hl_lines="5" from aws_lambda_powertools import Metrics @@ -124,6 +124,19 @@ If you'd like to remove them at some point, you can use `clear_default_dimension def lambda_handler(evt, ctx): metrics.add_metric(name="SuccessfulBooking", unit=MetricUnit.Count, value=1) ``` +=== "with log_metrics decorator" + + ```python hl_lines="5 7" + from aws_lambda_powertools import Metrics + from aws_lambda_powertools.metrics import MetricUnit + + metrics = Metrics(namespace="ExampleApplication", service="booking") + DEFAULT_DIMENSIONS = {"environment": "prod", "another": "one"} + + @metrics.log_metrics(default_dimensions=DEFAULT_DIMENSIONS) + def lambda_handler(evt, ctx): + metrics.add_metric(name="SuccessfulBooking", unit=MetricUnit.Count, value=1) + ``` ### Flushing metrics diff --git a/tests/functional/test_metrics.py b/tests/functional/test_metrics.py index 66ce4142eae..ee725da2699 100644 --- a/tests/functional/test_metrics.py +++ b/tests/functional/test_metrics.py @@ -798,3 +798,27 @@ def test_default_dimensions_across_instances(namespace): # THEN default dimensions should also be present assert "environment" in same_metrics.default_dimensions + + +def test_log_metrics_with_default_dimensions(capsys, metrics, dimensions, namespace): + # GIVEN Metrics is initialized + my_metrics = Metrics(namespace=namespace) + default_dimensions = {"environment": "test", "log_group": "/lambda/test"} + + # WHEN we utilize log_metrics with default dimensions to serialize + # and flush metrics and clear all metrics and dimensions from memory + # at the end of a function execution + @my_metrics.log_metrics(default_dimensions=default_dimensions) + def lambda_handler(evt, ctx): + for metric in metrics: + my_metrics.add_metric(**metric) + + lambda_handler({}, {}) + first_invocation = capture_metrics_output(capsys) + + lambda_handler({}, {}) + second_invocation = capture_metrics_output(capsys) + + # THEN we should have default dimensions in both outputs + assert "environment" in first_invocation + assert "environment" in second_invocation From 8d72b4735cb1a4183cc022783a8160939d4e7ec4 Mon Sep 17 00:00:00 2001 From: heitorlessa Date: Fri, 23 Apr 2021 12:07:24 +0200 Subject: [PATCH 09/11] docs: add functional testing section --- docs/core/metrics.md | 43 ++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 40 insertions(+), 3 deletions(-) diff --git a/docs/core/metrics.md b/docs/core/metrics.md index b3631a86f83..daa37226ed7 100644 --- a/docs/core/metrics.md +++ b/docs/core/metrics.md @@ -391,11 +391,11 @@ If you prefer setting environment variable for specific tests, and are using Pyt yield ``` -### Inspecting metrics +### Functional testing -As metrics are logged to standard output, you can read stdoutput and assert whether metrics are present. Here's an example using `pytest` with `capsys` built-in fixture: +As metrics are logged to standard output, you can read standard output and assert whether metrics are present. Here's an example using `pytest` with `capsys` built-in fixture: -=== "pytest_metrics_assertion.py" +=== "Assert single EMF blob with pytest.py" ```python hl_lines="6 9-10 23-34" from aws_lambda_powertools import Metrics @@ -424,4 +424,41 @@ As metrics are logged to standard output, you can read stdoutput and assert whet assert "SuccessfulBooking" in metrics_output["_aws"]["CloudWatchMetrics"][0]["Metrics"][0]["Name"] ``` +=== "Assert multiple EMF blobs with pytest" + + ```python hl_lines="8-9 11 21-23 25 29-30 32" + from aws_lambda_powertools import Metrics + from aws_lambda_powertools.metrics import MetricUnit + + from collections import namedtuple + + import json + + def capture_metrics_output_multiple_emf_objects(capsys): + return [json.loads(line.strip()) for line in capsys.readouterr().out.split("\n") if line] + + def test_log_metrics(capsys): + # GIVEN Metrics is initialized + metrics = Metrics(namespace="ServerlessAirline") + + # WHEN log_metrics is used with capture_cold_start_metric + @metrics.log_metrics(capture_cold_start_metric=True) + def lambda_handler(evt, ctx): + metrics.add_metric(name="SuccessfulBooking", unit=MetricUnit.Count, value=1) + metrics.add_dimension(name="environment", value="prod") + + # log_metrics uses function_name property from context to add as a dimension for cold start metric + LambdaContext = namedtuple("LambdaContext", "function_name") + lambda_handler({}, LambdaContext("example_fn") + + cold_start_blob, custom_metrics_blob = capture_metrics_output_multiple_emf_objects(capsys) + + # THEN ColdStart metric and function_name dimension should be logged + # in a separate EMF blob than the application metrics + assert cold_start_blob["ColdStart"] == [1.0] + assert cold_start_blob["function_name"] == "example_fn" + + assert "SuccessfulBooking" in custom_metrics_blob # as per previous example + ``` + !!! tip "For more elaborate assertions and comparisons, check out [our functional testing for Metrics utility](https://github.com/awslabs/aws-lambda-powertools-python/blob/develop/tests/functional/test_metrics.py)" From cf1dac5d3deb198e41299b3da242b48c391211ca Mon Sep 17 00:00:00 2001 From: heitorlessa Date: Fri, 23 Apr 2021 12:10:22 +0200 Subject: [PATCH 10/11] chore: add banner to inform about sparse ColdStart metric --- docs/core/metrics.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/core/metrics.md b/docs/core/metrics.md index daa37226ed7..f8f08758244 100644 --- a/docs/core/metrics.md +++ b/docs/core/metrics.md @@ -252,6 +252,8 @@ If it's a cold start invocation, this feature will: This has the advantage of keeping cold start metric separate from your application metrics, where you might have unrelated dimensions. +!!! info "We do not emit 0 as a value for ColdStart metric for cost reasons. [Let us know](https://github.com/awslabs/aws-lambda-powertools-python/issues/new?assignees=&labels=feature-request%2C+triage&template=feature_request.md&title=) if you'd prefer a flag to override it" + ## Advanced ### Adding metadata From a1da39629ec31dbc901f0fad92ff615d9943be17 Mon Sep 17 00:00:00 2001 From: heitorlessa Date: Fri, 23 Apr 2021 12:12:29 +0200 Subject: [PATCH 11/11] fix: warning about metrics/dimensions in global scope --- docs/core/metrics.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/core/metrics.md b/docs/core/metrics.md index f8f08758244..b556dce2a9e 100644 --- a/docs/core/metrics.md +++ b/docs/core/metrics.md @@ -104,6 +104,8 @@ You can create metrics using `add_metric`, and you can create dimensions for all !!! note "Metrics overflow" CloudWatch EMF supports a max of 100 metrics per batch. Metrics utility will flush all metrics when adding the 100th metric. Subsequent metrics, e.g. 101th, will be aggregated into a new EMF object, for your convenience. +!!! warning "Do not create metrics or dimensions outside the handler" + Metrics or dimensions added in the global scope will only be added during cold start. Disregard if you that's the intended behaviour. ### Adding default dimensions