Skip to content

feat(metrics): disable metrics flush via environment variables #6046

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 15 commits into from
Feb 11, 2025
27 changes: 27 additions & 0 deletions aws_lambda_powertools/metrics/functions.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
from __future__ import annotations

import os
from datetime import datetime

from aws_lambda_powertools.metrics.provider.cloudwatch_emf.exceptions import (
Expand All @@ -8,6 +9,7 @@
)
from aws_lambda_powertools.metrics.provider.cloudwatch_emf.metric_properties import MetricResolution, MetricUnit
from aws_lambda_powertools.shared import constants
from aws_lambda_powertools.shared.functions import strtobool


def extract_cloudwatch_metric_resolution_value(metric_resolutions: list, resolution: int | MetricResolution) -> int:
Expand Down Expand Up @@ -134,3 +136,28 @@ def convert_timestamp_to_emf_format(timestamp: int | datetime) -> int:
# Returning zero represents the initial date of epoch time,
# which will be skipped by Amazon CloudWatch.
return 0


def is_metrics_disabled() -> bool:
"""
Determine if metrics should be disabled based on environment variables.

Returns:
bool: True if metrics are disabled, False otherwise.

Rules:
- If POWERTOOLS_DEV is True and POWERTOOLS_METRICS_DISABLED is True: Disable metrics
- If POWERTOOLS_METRICS_DISABLED is True: Disable metrics
- If POWERTOOLS_DEV is True and POWERTOOLS_METRICS_DISABLED is not set: Disable metrics
"""

is_dev_mode = strtobool(os.getenv(constants.POWERTOOLS_DEV_ENV, "false"))
is_metrics_disabled = strtobool(os.getenv(constants.METRICS_DISABLED_ENV, "false"))

disable_conditions = [
is_metrics_disabled,
is_metrics_disabled and is_dev_mode,
is_dev_mode and os.getenv(constants.METRICS_DISABLED_ENV) is None,
]

return any(disable_conditions)
2 changes: 2 additions & 0 deletions aws_lambda_powertools/metrics/metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,8 @@ def lambda_handler():
metric namespace
POWERTOOLS_SERVICE_NAME : str
service name used for default dimension
POWERTOOLS_METRICS_DISABLED: bool
Powertools metrics disabled (e.g. `"true", "True", "TRUE"`)

Parameters
----------
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
convert_timestamp_to_emf_format,
extract_cloudwatch_metric_resolution_value,
extract_cloudwatch_metric_unit_value,
is_metrics_disabled,
validate_emf_timestamp,
)
from aws_lambda_powertools.metrics.provider.base import BaseProvider
Expand Down Expand Up @@ -77,6 +78,7 @@ def __init__(
self.default_dimensions = default_dimensions or {}
self.namespace = resolve_env_var_choice(choice=namespace, env=os.getenv(constants.METRICS_NAMESPACE_ENV))
self.service = resolve_env_var_choice(choice=service, env=os.getenv(constants.SERVICE_NAME_ENV))

self.metadata_set = metadata_set if metadata_set is not None else {}
self.timestamp: int | None = None

Expand Down Expand Up @@ -127,6 +129,7 @@ def add_metric(
MetricResolutionError
When metric resolution is not supported by CloudWatch
"""

if not isinstance(value, numbers.Number):
raise MetricValueError(f"{value} is not a valid number")

Expand Down Expand Up @@ -268,6 +271,7 @@ def add_dimension(self, name: str, value: str) -> None:
value : str
Dimension value
"""

logger.debug(f"Adding dimension: {name}:{value}")
if len(self.dimension_set) == MAX_DIMENSIONS:
raise SchemaValidationError(
Expand Down Expand Up @@ -374,7 +378,7 @@ def flush_metrics(self, raise_on_empty_metrics: bool = False) -> None:
"If application metrics should never be empty, consider using 'raise_on_empty_metrics'",
stacklevel=2,
)
else:
elif not is_metrics_disabled():
logger.debug("Flushing existing metrics")
metrics = self.serialize_metric_set()
print(json.dumps(metrics, separators=(",", ":")))
Expand Down
5 changes: 3 additions & 2 deletions aws_lambda_powertools/metrics/provider/datadog/datadog.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
from typing import TYPE_CHECKING, Any

from aws_lambda_powertools.metrics.exceptions import MetricValueError, SchemaValidationError
from aws_lambda_powertools.metrics.functions import is_metrics_disabled
from aws_lambda_powertools.metrics.provider import BaseProvider
from aws_lambda_powertools.metrics.provider.datadog.warnings import DatadogDataValidationWarning
from aws_lambda_powertools.shared import constants
Expand Down Expand Up @@ -99,7 +100,6 @@ def add_metric(
>>> sales='sam'
>>> )
"""

# validating metric name
if not self._validate_datadog_metric_name(name):
docs = "https://docs.datadoghq.com/metrics/custom_metrics/#naming-custom-metrics"
Expand Down Expand Up @@ -180,6 +180,7 @@ def flush_metrics(self, raise_on_empty_metrics: bool = False) -> None:
raise_on_empty_metrics : bool, optional
raise exception if no metrics are emitted, by default False
"""

if not raise_on_empty_metrics and len(self.metric_set) == 0:
warnings.warn(
"No application metrics to publish. The cold-start metric may be published if enabled. "
Expand All @@ -200,7 +201,7 @@ def flush_metrics(self, raise_on_empty_metrics: bool = False) -> None:
timestamp=metric_item["e"],
tags=metric_item["t"],
)
else:
elif not is_metrics_disabled():
# dd module not found: flush to log, this format can be recognized via datadog log forwarder
# https://github.com/Datadog/datadog-lambda-python/blob/main/datadog_lambda/metric.py#L77
for metric_item in metrics:
Expand Down
1 change: 1 addition & 0 deletions aws_lambda_powertools/shared/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@
METRICS_NAMESPACE_ENV: str = "POWERTOOLS_METRICS_NAMESPACE"
DATADOG_FLUSH_TO_LOG: str = "DD_FLUSH_TO_LOG"
SERVICE_NAME_ENV: str = "POWERTOOLS_SERVICE_NAME"
METRICS_DISABLED_ENV: str = "POWERTOOLS_METRICS_DISABLED"
# If the timestamp of log event is more than 2 hours in future, the log event is skipped.
# If the timestamp of log event is more than 14 days in past, the log event is skipped.
# See https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/AgentReference.html
Expand Down
16 changes: 10 additions & 6 deletions docs/core/metrics.md
Original file line number Diff line number Diff line change
Expand Up @@ -34,12 +34,16 @@ If you're new to Amazon CloudWatch, there are five terminologies you must be awa
???+ tip
All examples shared in this documentation are available within the [project repository](https://github.com/aws-powertools/powertools-lambda-python/tree/develop/examples){target="_blank"}.

Metric has two global settings that will be used across all metrics emitted:
Metric has three global settings that will be used across all metrics emitted:

| Setting | Description | Environment variable | Constructor parameter |
| -------------------- | ------------------------------------------------------------------------------- | ------------------------------ | --------------------- |
| **Metric namespace** | Logical container where all metrics will be placed e.g. `ServerlessAirline` | `POWERTOOLS_METRICS_NAMESPACE` | `namespace` |
| **Service** | Optionally, sets **service** metric dimension across all metrics e.g. `payment` | `POWERTOOLS_SERVICE_NAME` | `service` |
| Setting | Description | Environment variable | Constructor parameter |
| ------------------------------- | ------------------------------------------------------------------------------- | ------------------------------ | --------------------- |
| **Metric namespace** | Logical container where all metrics will be placed e.g. `ServerlessAirline` | `POWERTOOLS_METRICS_NAMESPACE` | `namespace` |
| **Service** | Optionally, sets **service** metric dimension across all metrics e.g. `payment` | `POWERTOOLS_SERVICE_NAME` | `service` |
| **Disable Powertools Metrics** | Optionally, disables all Powertools metrics. | `POWERTOOLS_METRICS_DISABLED` | N/A |

???+ info
`POWERTOOLS_METRICS_DISABLED` will not disable default metrics created by AWS services.

???+ tip
Use your application or main service as the metric namespace to easily group all metrics.
Expand Down Expand Up @@ -79,7 +83,7 @@ You can create metrics using `add_metric`, and you can create dimensions for all
CloudWatch EMF supports a max of 100 metrics per batch. Metrics utility will flush all metrics when adding the 100th metric. Subsequent metrics (101th+) will be aggregated into a new EMF object, for your convenience.

???+ warning "Warning: Do not create metrics or dimensions outside the handler"
Metrics or dimensions added in the global scope will only be added during cold start. Disregard if you that's the intended behavior.
Metrics or dimensions added in the global scope will only be added during cold start. Disregard if that's the intended behavior.

### Adding high-resolution metrics

Expand Down
14 changes: 9 additions & 5 deletions docs/core/metrics/datadog.md
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ stateDiagram-v2
DatadogExtension --> Datadog: async

state LambdaExtension {
DatadogExtension
DatadogExtension
}

```
Expand Down Expand Up @@ -174,10 +174,14 @@ This has the advantage of keeping cold start metric separate from your applicati

You can use any of the following environment variables to configure `DatadogMetrics`:

| Setting | Description | Environment variable | Constructor parameter |
| -------------------- | -------------------------------------------------------------------------------- | ------------------------------ | --------------------- |
| **Metric namespace** | Logical container where all metrics will be placed e.g. `ServerlessAirline` | `POWERTOOLS_METRICS_NAMESPACE` | `namespace` |
| **Flush to log** | Use this when you want to flush metrics to be exported through Datadog Forwarder | `DD_FLUSH_TO_LOG` | `flush_to_log` |
| Setting | Description | Environment variable | Constructor parameter |
| ------------------------------ | -------------------------------------------------------------------------------- | ------------------------------ | --------------------- |
| **Metric namespace** | Logical container where all metrics will be placed e.g. `ServerlessAirline` | `POWERTOOLS_METRICS_NAMESPACE` | `namespace` |
| **Flush to log** | Use this when you want to flush metrics to be exported through Datadog Forwarder | `DD_FLUSH_TO_LOG` | `flush_to_log` |
| **Disable Powertools Metrics** | Optionally, disables all Powertools metrics. | `POWERTOOLS_METRICS_DISABLED` | N/A |

???+ info
`POWERTOOLS_METRICS_DISABLED` will not disable default metrics created by AWS services.

## Advanced

Expand Down
1 change: 1 addition & 0 deletions docs/index.md
Original file line number Diff line number Diff line change
Expand Up @@ -432,6 +432,7 @@ When `POWERTOOLS_DEV` is set to a truthy value (`1`, `true`), it'll have the fol
| __Logger__ | Increase JSON indentation to 4. This will ease local debugging when running functions locally under emulators or direct calls while not affecting unit tests. <br><br> However, Amazon CloudWatch Logs view will degrade as each new line is treated as a new message. |
| __Event Handler__ | Enable full traceback errors in the response, indent request/responses, and CORS in dev mode (`*`). |
| __Tracer__ | Future-proof safety to disables tracing operations in non-Lambda environments. This already happens automatically in the Tracer utility. |
| __Metrics__ | Disables Powertools metrics emission by default. <br><br> However, this can be overridden by explicitly setting POWERTOOLS_METRICS_DISABLED=false, which takes precedence over the dev mode setting. |

## Debug mode

Expand Down
160 changes: 160 additions & 0 deletions tests/functional/metrics/datadog/test_metrics_datadog.py
Original file line number Diff line number Diff line change
Expand Up @@ -334,3 +334,163 @@ def test_namespace_env_var(monkeypatch):

# THEN namespace should match the explicitly passed variable and not the env var
assert output[0]["m"] == f"{env_namespace}.item_sold"


def test_metrics_disabled_with_env_var(monkeypatch, capsys):
# GIVEN environment variable is set to disable metrics
monkeypatch.setenv("POWERTOOLS_METRICS_DISABLED", "true")

# WHEN metrics is initialized and adding metrics
metrics = DatadogMetrics()
metrics.add_metric(name="test_metric", value=1)
metrics.flush_metrics()

# THEN no metrics should have been recorded
captured = capsys.readouterr()
assert not captured.out


def test_metrics_disabled_persists_after_flush(monkeypatch, capsys):
# GIVEN environment variable is set to disable metrics
monkeypatch.setenv("POWERTOOLS_METRICS_DISABLED", "true")
metrics = DatadogMetrics()

# WHEN multiple operations are performed with flush in between
metrics.add_metric(name="metric1", value=1)
metrics.flush_metrics()

# THEN first flush should not emit any metrics
captured = capsys.readouterr()
assert not captured.out

# WHEN adding and flushing more metrics
metrics.add_metric(name="metric2", value=2)
metrics.flush_metrics()

# THEN second flush should also not emit any metrics
captured = capsys.readouterr()
assert not captured.out


def test_metrics_disabled_with_namespace(monkeypatch, capsys):
# GIVEN environment variable is set to disable metrics
monkeypatch.setenv("POWERTOOLS_METRICS_DISABLED", "true")

# WHEN metrics is initialized with namespace and service
metrics = DatadogMetrics(namespace="test_namespace")
metrics.add_metric(name="test_metric", value=1)
metrics.flush_metrics()

# THEN no metrics should have been recorded
captured = capsys.readouterr()
assert not captured.out


def test_metrics_disabled_with_dev_mode_true(monkeypatch, capsys):
# GIVEN dev mode is enabled
monkeypatch.setenv("POWERTOOLS_DEV", "true")

# WHEN metrics is initialized
metrics = DatadogMetrics(namespace="test")
metrics.add_metric(name="test_metric", value=1)
metrics.flush_metrics()

# THEN no metrics should have been recorded
captured = capsys.readouterr()
assert not captured.out


def test_metrics_enabled_with_env_var_false(monkeypatch, capsys):
# GIVEN environment variable is set to enable metrics
monkeypatch.setenv("POWERTOOLS_METRICS_DISABLED", "false")

# WHEN metrics is initialized with namespace and metrics added
metrics = DatadogMetrics(namespace="test")
metrics.add_metric(name="test_metric", value=1)
metrics.flush_metrics()

# THEN Datadog metrics should be written to stdout
output = capsys.readouterr().out
metrics_output = json.loads(output)

assert metrics_output


def test_metrics_enabled_with_env_var_not_set(monkeypatch, capsys):
# GIVEN environment variable is not set
monkeypatch.delenv("POWERTOOLS_METRICS_DISABLED", raising=False)

# WHEN metrics is initialized with namespace and metrics added
metrics = DatadogMetrics(namespace="test")
metrics.add_metric(name="test_metric", value=1)
metrics.flush_metrics()

# THEN metrics should be written to stdout
output = capsys.readouterr().out
metrics_output = json.loads(output)

assert "test.test_metric" in metrics_output["m"]


def test_metrics_enabled_with_dev_mode_false(monkeypatch, capsys):
# GIVEN dev mode is disabled
monkeypatch.setenv("POWERTOOLS_DEV", "false")

# WHEN metrics is initialized
metrics = DatadogMetrics(namespace="test")
metrics.add_metric(name="test_metric", value=1)
metrics.flush_metrics()

# THEN metrics should be written to stdout
output = capsys.readouterr().out
metrics_output = json.loads(output)
assert metrics_output


def test_metrics_disabled_dev_mode_overrides_metrics_disabled(monkeypatch, capsys):
# GIVEN dev mode is enabled but metrics disabled is false
monkeypatch.setenv("POWERTOOLS_DEV", "true")
monkeypatch.setenv("POWERTOOLS_METRICS_DISABLED", "false")

# WHEN metrics is initialized
metrics = DatadogMetrics(namespace="test")
metrics.add_metric(name="test_metric", value=1)
metrics.flush_metrics()

# THEN metrics should be written to stdout since POWERTOOLS_METRICS_DISABLED is false
output = capsys.readouterr().out
assert output # First verify we have output
metrics_output = json.loads(output)
assert metrics_output # Then verify it's valid JSON
assert "test.test_metric" in metrics_output["m"] # Verify the metric is present


def test_metrics_enabled_with_both_false(monkeypatch, capsys):
# GIVEN both dev mode and metrics disabled are false
monkeypatch.setenv("POWERTOOLS_DEV", "false")
monkeypatch.setenv("POWERTOOLS_METRICS_DISABLED", "false")

# WHEN metrics is initialized
metrics = DatadogMetrics(namespace="test")
metrics.add_metric(name="test_metric", value=1)
metrics.flush_metrics()

# THEN metrics should be written to stdout
output = capsys.readouterr().out
metrics_output = json.loads(output)
assert metrics_output


def test_metrics_disabled_with_dev_mode_false_and_metrics_disabled_true(monkeypatch, capsys):
# GIVEN dev mode is false but metrics disabled is true
monkeypatch.setenv("POWERTOOLS_DEV", "false")
monkeypatch.setenv("POWERTOOLS_METRICS_DISABLED", "true")

# WHEN metrics is initialized
metrics = DatadogMetrics(namespace="test")
metrics.add_metric(name="test_metric", value=1)
metrics.flush_metrics()

# THEN no metrics should have been recorded
captured = capsys.readouterr()
assert not captured.out
Loading
Loading