@@ -41,31 +41,17 @@ Setting | Description | Environment variable | Constructor parameter
41
41
???+ example
42
42
** AWS Serverless Application Model (SAM)**
43
43
44
- === "template.yml"
45
-
46
- ```yaml hl_lines="9 10"
47
- Resources:
48
- HelloWorldFunction:
49
- Type: AWS::Serverless::Function
50
- Properties:
51
- Runtime: python3.8
52
- Environment:
53
- Variables:
54
- POWERTOOLS_SERVICE_NAME: payment
55
- POWERTOOLS_METRICS_NAMESPACE: ServerlessAirline
56
- ```
44
+ === "template.yml"
57
45
58
- === "app.py"
59
-
60
- ```python hl_lines="4 6"
61
- from aws_lambda_powertools import Metrics
62
- from aws_lambda_powertools.metrics import MetricUnit
46
+ ```yaml hl_lines="12-13"
47
+ --8<-- "docs/examples/core/metrics/template.yml"
48
+ ```
63
49
64
- metrics = Metrics() # Sets metric namespace and service via env var
65
- # OR
66
- metrics = Metrics(namespace="ServerlessAirline", service="orders") # Sets metric namespace, and service as a metric dimension
67
- ```
50
+ === "app.py"
68
51
52
+ ```python hl_lines="4 6-8"
53
+ --8<-- "docs/examples/core/metrics/example_app.py"
54
+ ```
69
55
70
56
### Creating metrics
71
57
@@ -76,28 +62,13 @@ You can create metrics using `add_metric`, and you can create dimensions for all
76
62
77
63
=== "Metrics"
78
64
79
- ```python hl_lines="8"
80
- from aws_lambda_powertools import Metrics
81
- from aws_lambda_powertools.metrics import MetricUnit
82
-
83
- metrics = Metrics(namespace="ExampleApplication", service="booking")
84
-
85
- @metrics.log_metrics
86
- def lambda_handler(evt, ctx):
87
- metrics.add_metric(name="SuccessfulBooking", unit=MetricUnit.Count, value=1)
65
+ ```python hl_lines="9"
66
+ --8<-- "docs/examples/core/metrics/metrics_app.py"
88
67
```
89
68
=== "Metrics with custom dimensions"
90
69
91
- ```python hl_lines="8-9"
92
- from aws_lambda_powertools import Metrics
93
- from aws_lambda_powertools.metrics import MetricUnit
94
-
95
- metrics = Metrics(namespace="ExampleApplication", service="booking")
96
-
97
- @metrics.log_metrics
98
- def lambda_handler(evt, ctx):
99
- metrics.add_dimension(name="environment", value="prod")
100
- metrics.add_metric(name="SuccessfulBooking", unit=MetricUnit.Count, value=1)
70
+ ```python hl_lines="9-10"
71
+ --8<-- "docs/examples/core/metrics/metrics_custom_dimensions_app.py"
101
72
```
102
73
103
74
???+ tip "Tip: Autocomplete Metric Units"
@@ -118,28 +89,12 @@ If you'd like to remove them at some point, you can use `clear_default_dimension
118
89
=== "set_default_dimensions method"
119
90
120
91
```python hl_lines="5"
121
- from aws_lambda_powertools import Metrics
122
- from aws_lambda_powertools.metrics import MetricUnit
123
-
124
- metrics = Metrics(namespace="ExampleApplication", service="booking")
125
- metrics.set_default_dimensions(environment="prod", another="one")
126
-
127
- @metrics.log_metrics
128
- def lambda_handler(evt, ctx):
129
- metrics.add_metric(name="SuccessfulBooking", unit=MetricUnit.Count, value=1)
92
+ --8<-- "docs/examples/core/metrics/set_default_dimensions.py"
130
93
```
131
94
=== "with log_metrics decorator"
132
95
133
- ```python hl_lines="5 7"
134
- from aws_lambda_powertools import Metrics
135
- from aws_lambda_powertools.metrics import MetricUnit
136
-
137
- metrics = Metrics(namespace="ExampleApplication", service="booking")
138
- DEFAULT_DIMENSIONS = {"environment": "prod", "another": "one"}
139
-
140
- @metrics.log_metrics(default_dimensions=DEFAULT_DIMENSIONS)
141
- def lambda_handler(evt, ctx):
142
- metrics.add_metric(name="SuccessfulBooking", unit=MetricUnit.Count, value=1)
96
+ ```python hl_lines="5 8"
97
+ --8<-- "docs/examples/core/metrics/log_metrics_default_dimensions.py"
143
98
```
144
99
145
100
### Flushing metrics
@@ -150,15 +105,8 @@ This decorator also **validates**, **serializes**, and **flushes** all your metr
150
105
151
106
=== "app.py"
152
107
153
- ```python hl_lines="6"
154
- from aws_lambda_powertools import Metrics
155
- from aws_lambda_powertools.metrics import MetricUnit
156
-
157
- metrics = Metrics(namespace="ExampleApplication", service="ExampleService")
158
-
159
- @metrics.log_metrics
160
- def lambda_handler(evt, ctx):
161
- metrics.add_metric(name="BookingConfirmation", unit=MetricUnit.Count, value=1)
108
+ ```python hl_lines="7"
109
+ --8<-- "docs/examples/core/metrics/flush_metrics.py"
162
110
```
163
111
=== "Example CloudWatch Logs excerpt"
164
112
@@ -199,14 +147,8 @@ This decorator also **validates**, **serializes**, and **flushes** all your metr
199
147
200
148
If you want to ensure at least one metric is always emitted, you can pass ` raise_on_empty_metrics ` to the ** log_metrics** decorator:
201
149
202
- ``` python hl_lines="5" title="Raising SchemaValidationError exception if no metrics are added"
203
- from aws_lambda_powertools.metrics import Metrics
204
-
205
- metrics = Metrics()
206
-
207
- @metrics.log_metrics (raise_on_empty_metrics = True )
208
- def lambda_handler (evt , ctx ):
209
- ...
150
+ ``` python hl_lines="6" title="Raising SchemaValidationError exception if no metrics are added"
151
+ -- 8 < -- " docs/examples/core/metrics/log_metrics_raise_on_empty_metrics.py"
210
152
```
211
153
212
154
???+ tip "Suppressing warning messages on empty metrics"
@@ -216,31 +158,16 @@ def lambda_handler(evt, ctx):
216
158
217
159
When using multiple middlewares, use ` log_metrics ` as your ** last decorator** wrapping all subsequent ones to prevent early Metric validations when code hasn't been run yet.
218
160
219
- ``` python hl_lines="7-8" title="Example with multiple decorators"
220
- from aws_lambda_powertools import Metrics, Tracer
221
- from aws_lambda_powertools.metrics import MetricUnit
222
-
223
- tracer = Tracer(service = " booking" )
224
- metrics = Metrics(namespace = " ExampleApplication" , service = " booking" )
225
-
226
- @metrics.log_metrics
227
- @tracer.capture_lambda_handler
228
- def lambda_handler (evt , ctx ):
229
- metrics.add_metric(name = " BookingConfirmation" , unit = MetricUnit.Count, value = 1 )
161
+ ``` python hl_lines="8-9" title="Example with multiple decorators"
162
+ -- 8 < -- " docs/examples/core/metrics/log_metrics_multiple_decorators.py"
230
163
```
231
164
232
165
### Capturing cold start metric
233
166
234
167
You can optionally capture cold start metrics with ` log_metrics ` decorator via ` capture_cold_start_metric ` param.
235
168
236
- ``` python hl_lines="5" title="Generating function cold start metric"
237
- from aws_lambda_powertools import Metrics
238
-
239
- metrics = Metrics(service = " ExampleService" )
240
-
241
- @metrics.log_metrics (capture_cold_start_metric = True )
242
- def lambda_handler (evt , ctx ):
243
- ...
169
+ ``` python hl_lines="6" title="Generating function cold start metric"
170
+ -- 8 < -- " docs/examples/core/metrics/log_metrics_capture_cold_start_metric.py"
244
171
```
245
172
246
173
If it's a cold start invocation, this feature will:
@@ -264,16 +191,8 @@ You can add high-cardinality data as part of your Metrics log with `add_metadata
264
191
265
192
=== "app.py"
266
193
267
- ```python hl_lines="9"
268
- from aws_lambda_powertools import Metrics
269
- from aws_lambda_powertools.metrics import MetricUnit
270
-
271
- metrics = Metrics(namespace="ExampleApplication", service="booking")
272
-
273
- @metrics.log_metrics
274
- def lambda_handler(evt, ctx):
275
- metrics.add_metric(name="SuccessfulBooking", unit=MetricUnit.Count, value=1)
276
- metrics.add_metadata(key="booking_id", value="booking_uuid")
194
+ ```python hl_lines="10"
195
+ --8<-- "docs/examples/core/metrics/add_metadata.py"
277
196
```
278
197
279
198
=== "Example CloudWatch Logs excerpt"
@@ -315,14 +234,7 @@ CloudWatch EMF uses the same dimensions across all your metrics. Use `single_met
315
234
**unique metric = (metric_name + dimension_name + dimension_value)**
316
235
317
236
``` python hl_lines="6-7" title="Generating an EMF blob with a single metric"
318
- from aws_lambda_powertools import single_metric
319
- from aws_lambda_powertools.metrics import MetricUnit
320
-
321
-
322
- def lambda_handler (evt , ctx ):
323
- with single_metric(name = " ColdStart" , unit = MetricUnit.Count, value = 1 , namespace = " ExampleApplication" ) as metric:
324
- metric.add_dimension(name = " function_context" , value = " $LATEST" )
325
- ...
237
+ -- 8 < -- " docs/examples/core/metrics/single_metric.py"
326
238
```
327
239
328
240
### Flushing metrics manually
@@ -332,18 +244,8 @@ If you prefer not to use `log_metrics` because you might want to encapsulate add
332
244
???+ warning
333
245
Metrics, dimensions and namespace validation still applies
334
246
335
- ``` python hl_lines="9-11" title="Manually flushing and clearing metrics from memory"
336
- import json
337
- from aws_lambda_powertools import Metrics
338
- from aws_lambda_powertools.metrics import MetricUnit
339
-
340
- metrics = Metrics(namespace = " ExampleApplication" , service = " booking" )
341
-
342
- def lambda_handler (evt , ctx ):
343
- metrics.add_metric(name = " ColdStart" , unit = MetricUnit.Count, value = 1 )
344
- your_metrics_object = metrics.serialize_metric_set()
345
- metrics.clear_metrics()
346
- print (json.dumps(your_metrics_object))
247
+ ``` python hl_lines="11-13" title="Manually flushing and clearing metrics from memory"
248
+ -- 8 < -- " docs/examples/core/metrics/flush_metrics_manually.py"
347
249
```
348
250
349
251
## Testing your code
@@ -366,14 +268,7 @@ POWERTOOLS_SERVICE_NAME="Example" POWERTOOLS_METRICS_NAMESPACE="Application" pyt
366
268
` Metrics ` keep metrics in memory across multiple instances. If you need to test this behaviour, you can use the following Pytest fixture to ensure metrics are reset incl. cold start:
367
269
368
270
``` python title="Clearing metrics between tests"
369
- @pytest.fixture (scope = " function" , autouse = True )
370
- def reset_metric_set ():
371
- # Clear out every metric data prior to every test
372
- metrics = Metrics()
373
- metrics.clear_metrics()
374
- metrics_global.is_cold_start = True # ensure each test has cold start
375
- metrics.clear_default_dimensions() # remove persisted default dimensions, if any
376
- yield
271
+ -- 8 < -- " docs/examples/core/metrics/clear_metrics_between_tests.py"
377
272
```
378
273
379
274
### Functional testing
@@ -382,68 +277,14 @@ As metrics are logged to standard output, you can read standard output and asser
382
277
383
278
=== "Assert single EMF blob with pytest.py"
384
279
385
- ```python hl_lines="6 9-10 23-34"
386
- from aws_lambda_powertools import Metrics
387
- from aws_lambda_powertools.metrics import MetricUnit
388
-
389
- import json
390
-
391
- def test_log_metrics(capsys):
392
- # GIVEN Metrics is initialized
393
- metrics = Metrics(namespace="ServerlessAirline")
394
-
395
- # WHEN we utilize log_metrics to serialize
396
- # and flush all metrics at the end of a function execution
397
- @metrics.log_metrics
398
- def lambda_handler(evt, ctx):
399
- metrics.add_metric(name="SuccessfulBooking", unit=MetricUnit.Count, value=1)
400
- metrics.add_dimension(name="environment", value="prod")
401
-
402
- lambda_handler({}, {})
403
- log = capsys.readouterr().out.strip() # remove any extra line
404
- metrics_output = json.loads(log) # deserialize JSON str
405
-
406
- # THEN we should have no exceptions
407
- # and a valid EMF object should be flushed correctly
408
- assert "SuccessfulBooking" in log # basic string assertion in JSON str
409
- assert "SuccessfulBooking" in metrics_output["_aws"]["CloudWatchMetrics"][0]["Metrics"][0]["Name"]
280
+ ```python hl_lines="7 15-16 24-25"
281
+ --8<-- "docs/examples/core/metrics/functional_testing.py"
410
282
```
411
283
412
284
=== "Assert multiple EMF blobs with pytest"
413
285
414
- ```python hl_lines="8-9 11 21-23 25 29-30 32"
415
- from aws_lambda_powertools import Metrics
416
- from aws_lambda_powertools.metrics import MetricUnit
417
-
418
- from collections import namedtuple
419
-
420
- import json
421
-
422
- def capture_metrics_output_multiple_emf_objects(capsys):
423
- return [json.loads(line.strip()) for line in capsys.readouterr().out.split("\n") if line]
424
-
425
- def test_log_metrics(capsys):
426
- # GIVEN Metrics is initialized
427
- metrics = Metrics(namespace="ServerlessAirline")
428
-
429
- # WHEN log_metrics is used with capture_cold_start_metric
430
- @metrics.log_metrics(capture_cold_start_metric=True)
431
- def lambda_handler(evt, ctx):
432
- metrics.add_metric(name="SuccessfulBooking", unit=MetricUnit.Count, value=1)
433
- metrics.add_dimension(name="environment", value="prod")
434
-
435
- # log_metrics uses function_name property from context to add as a dimension for cold start metric
436
- LambdaContext = namedtuple("LambdaContext", "function_name")
437
- lambda_handler({}, LambdaContext("example_fn")
438
-
439
- cold_start_blob, custom_metrics_blob = capture_metrics_output_multiple_emf_objects(capsys)
440
-
441
- # THEN ColdStart metric and function_name dimension should be logged
442
- # in a separate EMF blob than the application metrics
443
- assert cold_start_blob["ColdStart"] == [1.0]
444
- assert cold_start_blob["function_name"] == "example_fn"
445
-
446
- assert "SuccessfulBooking" in custom_metrics_blob # as per previous example
286
+ ```python hl_lines="8-9 12 22-24 26 30-31 33"
287
+ --8<-- "docs/examples/core/metrics/functional_testing_multiple_blobs.py"
447
288
```
448
289
449
290
???+ tip
0 commit comments