Skip to content

Commit b9f8cea

Browse files
committed
Use a different Metric class for different provider
1 parent f00eeab commit b9f8cea

File tree

3 files changed

+234
-0
lines changed

3 files changed

+234
-0
lines changed
Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
from aws_lambda_powertools.metrics.provider.base import MetricsBase, MetricsProviderBase
2+
from aws_lambda_powertools.metrics.provider.datadog_provider_draft import (
3+
DataDogMetrics,
4+
DataDogProvider,
5+
)
6+
7+
__all__ = [
8+
"MetricsBase",
9+
"MetricsProviderBase",
10+
"DataDogMetrics",
11+
"DataDogProvider",
12+
]
Lines changed: 126 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,126 @@
1+
import functools
2+
import logging
3+
from abc import ABC, abstractmethod
4+
from typing import Any, Callable, Dict, Optional, Union
5+
6+
logger = logging.getLogger(__name__)
7+
8+
9+
class MetricsProviderBase(ABC):
10+
"""Class for metric provider template
11+
12+
Use this template to create your own metric provider.
13+
14+
"""
15+
16+
# General add metric function. Should return combined metrics Dict
17+
@abstractmethod
18+
def add_metric(self, *args, **kwargs):
19+
pass
20+
21+
# serialize and return dict for flushing
22+
@abstractmethod
23+
def serialize(self, *args, **kwargs):
24+
pass
25+
26+
# flush serialized data to output, or send to API directly
27+
@abstractmethod
28+
def flush(self, *args, **kwargs):
29+
pass
30+
31+
32+
class MetricsBase(ABC):
33+
"""Class for metric template
34+
35+
Use this template to create your own metric class.
36+
37+
"""
38+
39+
@abstractmethod
40+
def add_metric(self, *args, **kwargs):
41+
pass
42+
43+
@abstractmethod
44+
def flush_metrics(self, raise_on_empty_metrics: bool = False) -> None:
45+
pass
46+
47+
def log_metrics(
48+
self,
49+
lambda_handler: Union[Callable[[Dict, Any], Any], Optional[Callable[[Dict, Any, Optional[Dict]], Any]]] = None,
50+
capture_cold_start_metric: bool = False,
51+
raise_on_empty_metrics: bool = False,
52+
):
53+
"""Decorator to serialize and publish metrics at the end of a function execution.
54+
55+
Be aware that the log_metrics **does call* the decorated function (e.g. lambda_handler).
56+
57+
Example
58+
-------
59+
**Lambda function using tracer and metrics decorators**
60+
61+
from aws_lambda_powertools import Metrics, Tracer
62+
63+
metrics = Metrics(service="payment")
64+
tracer = Tracer(service="payment")
65+
66+
@tracer.capture_lambda_handler
67+
@metrics.log_metrics
68+
def handler(event, context):
69+
...
70+
71+
Parameters
72+
----------
73+
lambda_handler : Callable[[Any, Any], Any], optional
74+
lambda function handler, by default None
75+
capture_cold_start_metric : bool, optional
76+
captures cold start metric, by default False
77+
raise_on_empty_metrics : bool, optional
78+
raise exception if no metrics are emitted, by default False
79+
default_dimensions: Dict[str, str], optional
80+
metric dimensions as key=value that will always be present
81+
82+
Raises
83+
------
84+
e
85+
Propagate error received
86+
"""
87+
88+
# If handler is None we've been called with parameters
89+
# Return a partial function with args filled
90+
if lambda_handler is None:
91+
logger.debug("Decorator called with parameters")
92+
return functools.partial(
93+
self.log_metrics,
94+
capture_cold_start_metric=capture_cold_start_metric,
95+
raise_on_empty_metrics=raise_on_empty_metrics,
96+
)
97+
98+
@functools.wraps(lambda_handler)
99+
def decorate(event, context):
100+
try:
101+
response = lambda_handler(event, context)
102+
if capture_cold_start_metric:
103+
self._add_cold_start_metric(context=context)
104+
finally:
105+
self.flush_metrics(raise_on_empty_metrics=raise_on_empty_metrics)
106+
107+
return response
108+
109+
return decorate
110+
111+
def _add_cold_start_metric(self, context: Any) -> None:
112+
"""Add cold start metric and function_name dimension
113+
114+
Parameters
115+
----------
116+
context : Any
117+
Lambda context
118+
"""
119+
global is_cold_start
120+
if not is_cold_start:
121+
return
122+
123+
logger.debug("Adding cold start metric and function_name dimension")
124+
self.add_metric(name="ColdStart", value=1)
125+
126+
is_cold_start = False
Lines changed: 96 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,96 @@
1+
from __future__ import annotations
2+
3+
import json
4+
import logging
5+
import numbers
6+
import time
7+
import warnings
8+
from typing import Dict, List
9+
10+
from aws_lambda_powertools.metrics.exceptions import MetricValueError
11+
from aws_lambda_powertools.metrics.provider import MetricsBase, MetricsProviderBase
12+
13+
logger = logging.getLogger(__name__)
14+
15+
# Check if using layer
16+
try:
17+
from datadog import lambda_metric
18+
except ImportError:
19+
lambda_metric = None
20+
21+
22+
class DataDogProvider(MetricsProviderBase):
23+
"""Class for datadog provider."""
24+
25+
def __init__(self, namespace):
26+
self.metrics = []
27+
self.namespace = namespace
28+
super().__init__()
29+
30+
# adding timestamp, tags. unit, resolution, name will not be used
31+
def add_metric(self, name, value, timestamp, tag: List = None):
32+
if not isinstance(value, numbers.Real):
33+
raise MetricValueError(f"{value} is not a valid number")
34+
if not timestamp:
35+
timestamp = time.time()
36+
self.metrics.append({"m": name, "v": float(value), "e": timestamp, "t": []})
37+
38+
# serialize for flushing
39+
def serialize(self) -> Dict:
40+
# logic here is to add dimension and metadata to each metric's tag with "key:value" format
41+
extra_tags = []
42+
output_list = []
43+
44+
for single_metric in self.metrics:
45+
output_list.append(
46+
{
47+
"m": f"{self.namespace}.{single_metric['m']}",
48+
"v": single_metric["v"],
49+
"e": single_metric["e"],
50+
"t": single_metric["t"] + extra_tags,
51+
}
52+
)
53+
54+
return {"List": output_list}
55+
56+
# flush serialized data to output
57+
def flush(self, metrics):
58+
# submit through datadog extension
59+
if lambda_metric:
60+
for metric_item in metrics.get("List"):
61+
lambda_metric(
62+
metric_name=metric_item["m"],
63+
value=metric_item["v"],
64+
timestamp=metric_item["e"],
65+
tags=metric_item["t"],
66+
)
67+
# flush to log with datadog format
68+
# https://github.com/DataDog/datadog-lambda-python/blob/main/datadog_lambda/metric.py#L77
69+
else:
70+
for metric_item in metrics.get("List"):
71+
print(json.dumps(metric_item, separators=(",", ":")))
72+
73+
def clear(self):
74+
self.metrics = []
75+
76+
77+
class DataDogMetrics(MetricsBase):
78+
"""Class for datadog metrics."""
79+
80+
def __init__(self, provider):
81+
self.provider = provider
82+
super().__init__()
83+
84+
def add_metric(self, name: str, value: float, timestamp: time, tags: List = None):
85+
self.provider.add_metric(name, value, timestamp, tags)
86+
87+
def flush_metrics(self, raise_on_empty_metrics: bool = False) -> None:
88+
metrics = self.provider.serialize()
89+
if not metrics and raise_on_empty_metrics:
90+
warnings.warn(
91+
"No application metrics to publish. The cold-start metric may be published if enabled. "
92+
"If application metrics should never be empty, consider using 'raise_on_empty_metrics'",
93+
stacklevel=2,
94+
)
95+
self.provider.flush(metrics)
96+
self.provider.clear()

0 commit comments

Comments
 (0)