Skip to content

Commit 6a80ce0

Browse files
committed
fix(docs): Extract metrics code examples
Changes: - Extract code examples - Run isort and black - Fix python and yaml examples - Update line highlights - Add make task Related to: - aws-powertools#1064
1 parent b577366 commit 6a80ce0

18 files changed

+240
-192
lines changed

Diff for: Makefile

+8
Original file line numberDiff line numberDiff line change
@@ -90,3 +90,11 @@ changelog:
9090

9191
mypy:
9292
poetry run mypy --pretty aws_lambda_powertools
93+
94+
format-examples:
95+
poetry run isort docs/examples
96+
poetry run black docs/examples/*/*/*.py
97+
98+
lint-examples:
99+
poetry run python3 -m py_compile docs/examples/*/*/*.py
100+
cfn-lint docs/examples/*/*/*.yml

Diff for: docs/core/metrics.md

+33-192
Original file line numberDiff line numberDiff line change
@@ -41,31 +41,17 @@ Setting | Description | Environment variable | Constructor parameter
4141
???+ example
4242
**AWS Serverless Application Model (SAM)**
4343

44-
=== "template.yml"
45-
46-
```yaml hl_lines="9 10"
47-
Resources:
48-
HelloWorldFunction:
49-
Type: AWS::Serverless::Function
50-
Properties:
51-
Runtime: python3.8
52-
Environment:
53-
Variables:
54-
POWERTOOLS_SERVICE_NAME: payment
55-
POWERTOOLS_METRICS_NAMESPACE: ServerlessAirline
56-
```
44+
=== "template.yml"
5745

58-
=== "app.py"
59-
60-
```python hl_lines="4 6"
61-
from aws_lambda_powertools import Metrics
62-
from aws_lambda_powertools.metrics import MetricUnit
46+
```yaml hl_lines="12-13"
47+
--8<-- "docs/examples/core/metrics/template.yml"
48+
```
6349

64-
metrics = Metrics() # Sets metric namespace and service via env var
65-
# OR
66-
metrics = Metrics(namespace="ServerlessAirline", service="orders") # Sets metric namespace, and service as a metric dimension
67-
```
50+
=== "app.py"
6851

52+
```python hl_lines="4 6-8"
53+
--8<-- "docs/examples/core/metrics/example_app.py"
54+
```
6955

7056
### Creating metrics
7157

@@ -76,28 +62,13 @@ You can create metrics using `add_metric`, and you can create dimensions for all
7662

7763
=== "Metrics"
7864

79-
```python hl_lines="8"
80-
from aws_lambda_powertools import Metrics
81-
from aws_lambda_powertools.metrics import MetricUnit
82-
83-
metrics = Metrics(namespace="ExampleApplication", service="booking")
84-
85-
@metrics.log_metrics
86-
def lambda_handler(evt, ctx):
87-
metrics.add_metric(name="SuccessfulBooking", unit=MetricUnit.Count, value=1)
65+
```python hl_lines="9"
66+
--8<-- "docs/examples/core/metrics/metrics_app.py"
8867
```
8968
=== "Metrics with custom dimensions"
9069

91-
```python hl_lines="8-9"
92-
from aws_lambda_powertools import Metrics
93-
from aws_lambda_powertools.metrics import MetricUnit
94-
95-
metrics = Metrics(namespace="ExampleApplication", service="booking")
96-
97-
@metrics.log_metrics
98-
def lambda_handler(evt, ctx):
99-
metrics.add_dimension(name="environment", value="prod")
100-
metrics.add_metric(name="SuccessfulBooking", unit=MetricUnit.Count, value=1)
70+
```python hl_lines="9-10"
71+
--8<-- "docs/examples/core/metrics/metrics_custom_dimensions_app.py"
10172
```
10273

10374
???+ tip "Tip: Autocomplete Metric Units"
@@ -118,28 +89,12 @@ If you'd like to remove them at some point, you can use `clear_default_dimension
11889
=== "set_default_dimensions method"
11990

12091
```python hl_lines="5"
121-
from aws_lambda_powertools import Metrics
122-
from aws_lambda_powertools.metrics import MetricUnit
123-
124-
metrics = Metrics(namespace="ExampleApplication", service="booking")
125-
metrics.set_default_dimensions(environment="prod", another="one")
126-
127-
@metrics.log_metrics
128-
def lambda_handler(evt, ctx):
129-
metrics.add_metric(name="SuccessfulBooking", unit=MetricUnit.Count, value=1)
92+
--8<-- "docs/examples/core/metrics/set_default_dimensions.py"
13093
```
13194
=== "with log_metrics decorator"
13295

133-
```python hl_lines="5 7"
134-
from aws_lambda_powertools import Metrics
135-
from aws_lambda_powertools.metrics import MetricUnit
136-
137-
metrics = Metrics(namespace="ExampleApplication", service="booking")
138-
DEFAULT_DIMENSIONS = {"environment": "prod", "another": "one"}
139-
140-
@metrics.log_metrics(default_dimensions=DEFAULT_DIMENSIONS)
141-
def lambda_handler(evt, ctx):
142-
metrics.add_metric(name="SuccessfulBooking", unit=MetricUnit.Count, value=1)
96+
```python hl_lines="5 8"
97+
--8<-- "docs/examples/core/metrics/log_metrics_default_dimensions.py"
14398
```
14499

145100
### Flushing metrics
@@ -150,15 +105,8 @@ This decorator also **validates**, **serializes**, and **flushes** all your metr
150105

151106
=== "app.py"
152107

153-
```python hl_lines="6"
154-
from aws_lambda_powertools import Metrics
155-
from aws_lambda_powertools.metrics import MetricUnit
156-
157-
metrics = Metrics(namespace="ExampleApplication", service="ExampleService")
158-
159-
@metrics.log_metrics
160-
def lambda_handler(evt, ctx):
161-
metrics.add_metric(name="BookingConfirmation", unit=MetricUnit.Count, value=1)
108+
```python hl_lines="7"
109+
--8<-- "docs/examples/core/metrics/flush_metrics.py"
162110
```
163111
=== "Example CloudWatch Logs excerpt"
164112

@@ -199,14 +147,8 @@ This decorator also **validates**, **serializes**, and **flushes** all your metr
199147

200148
If you want to ensure at least one metric is always emitted, you can pass `raise_on_empty_metrics` to the **log_metrics** decorator:
201149

202-
```python hl_lines="5" title="Raising SchemaValidationError exception if no metrics are added"
203-
from aws_lambda_powertools.metrics import Metrics
204-
205-
metrics = Metrics()
206-
207-
@metrics.log_metrics(raise_on_empty_metrics=True)
208-
def lambda_handler(evt, ctx):
209-
...
150+
```python hl_lines="6" title="Raising SchemaValidationError exception if no metrics are added"
151+
--8<-- "docs/examples/core/metrics/log_metrics_raise_on_empty_metrics.py"
210152
```
211153

212154
???+ tip "Suppressing warning messages on empty metrics"
@@ -216,31 +158,16 @@ def lambda_handler(evt, ctx):
216158

217159
When using multiple middlewares, use `log_metrics` as your **last decorator** wrapping all subsequent ones to prevent early Metric validations when code hasn't been run yet.
218160

219-
```python hl_lines="7-8" title="Example with multiple decorators"
220-
from aws_lambda_powertools import Metrics, Tracer
221-
from aws_lambda_powertools.metrics import MetricUnit
222-
223-
tracer = Tracer(service="booking")
224-
metrics = Metrics(namespace="ExampleApplication", service="booking")
225-
226-
@metrics.log_metrics
227-
@tracer.capture_lambda_handler
228-
def lambda_handler(evt, ctx):
229-
metrics.add_metric(name="BookingConfirmation", unit=MetricUnit.Count, value=1)
161+
```python hl_lines="8-9" title="Example with multiple decorators"
162+
--8<-- "docs/examples/core/metrics/log_metrics_multiple_decorators.py"
230163
```
231164

232165
### Capturing cold start metric
233166

234167
You can optionally capture cold start metrics with `log_metrics` decorator via `capture_cold_start_metric` param.
235168

236-
```python hl_lines="5" title="Generating function cold start metric"
237-
from aws_lambda_powertools import Metrics
238-
239-
metrics = Metrics(service="ExampleService")
240-
241-
@metrics.log_metrics(capture_cold_start_metric=True)
242-
def lambda_handler(evt, ctx):
243-
...
169+
```python hl_lines="6" title="Generating function cold start metric"
170+
--8<-- "docs/examples/core/metrics/log_metrics_capture_cold_start_metric.py"
244171
```
245172

246173
If it's a cold start invocation, this feature will:
@@ -264,16 +191,8 @@ You can add high-cardinality data as part of your Metrics log with `add_metadata
264191

265192
=== "app.py"
266193

267-
```python hl_lines="9"
268-
from aws_lambda_powertools import Metrics
269-
from aws_lambda_powertools.metrics import MetricUnit
270-
271-
metrics = Metrics(namespace="ExampleApplication", service="booking")
272-
273-
@metrics.log_metrics
274-
def lambda_handler(evt, ctx):
275-
metrics.add_metric(name="SuccessfulBooking", unit=MetricUnit.Count, value=1)
276-
metrics.add_metadata(key="booking_id", value="booking_uuid")
194+
```python hl_lines="10"
195+
--8<-- "docs/examples/core/metrics/add_metadata.py"
277196
```
278197

279198
=== "Example CloudWatch Logs excerpt"
@@ -315,14 +234,7 @@ CloudWatch EMF uses the same dimensions across all your metrics. Use `single_met
315234
**unique metric = (metric_name + dimension_name + dimension_value)**
316235

317236
```python hl_lines="6-7" title="Generating an EMF blob with a single metric"
318-
from aws_lambda_powertools import single_metric
319-
from aws_lambda_powertools.metrics import MetricUnit
320-
321-
322-
def lambda_handler(evt, ctx):
323-
with single_metric(name="ColdStart", unit=MetricUnit.Count, value=1, namespace="ExampleApplication") as metric:
324-
metric.add_dimension(name="function_context", value="$LATEST")
325-
...
237+
--8<-- "docs/examples/core/metrics/single_metric.py"
326238
```
327239

328240
### Flushing metrics manually
@@ -332,18 +244,8 @@ If you prefer not to use `log_metrics` because you might want to encapsulate add
332244
???+ warning
333245
Metrics, dimensions and namespace validation still applies
334246

335-
```python hl_lines="9-11" title="Manually flushing and clearing metrics from memory"
336-
import json
337-
from aws_lambda_powertools import Metrics
338-
from aws_lambda_powertools.metrics import MetricUnit
339-
340-
metrics = Metrics(namespace="ExampleApplication", service="booking")
341-
342-
def lambda_handler(evt, ctx):
343-
metrics.add_metric(name="ColdStart", unit=MetricUnit.Count, value=1)
344-
your_metrics_object = metrics.serialize_metric_set()
345-
metrics.clear_metrics()
346-
print(json.dumps(your_metrics_object))
247+
```python hl_lines="11-13" title="Manually flushing and clearing metrics from memory"
248+
--8<-- "docs/examples/core/metrics/flush_metrics_manually.py"
347249
```
348250

349251
## Testing your code
@@ -366,14 +268,7 @@ POWERTOOLS_SERVICE_NAME="Example" POWERTOOLS_METRICS_NAMESPACE="Application" pyt
366268
`Metrics` keep metrics in memory across multiple instances. If you need to test this behaviour, you can use the following Pytest fixture to ensure metrics are reset incl. cold start:
367269

368270
```python title="Clearing metrics between tests"
369-
@pytest.fixture(scope="function", autouse=True)
370-
def reset_metric_set():
371-
# Clear out every metric data prior to every test
372-
metrics = Metrics()
373-
metrics.clear_metrics()
374-
metrics_global.is_cold_start = True # ensure each test has cold start
375-
metrics.clear_default_dimensions() # remove persisted default dimensions, if any
376-
yield
271+
--8<-- "docs/examples/core/metrics/clear_metrics_between_tests.py"
377272
```
378273

379274
### Functional testing
@@ -382,68 +277,14 @@ As metrics are logged to standard output, you can read standard output and asser
382277

383278
=== "Assert single EMF blob with pytest.py"
384279

385-
```python hl_lines="6 9-10 23-34"
386-
from aws_lambda_powertools import Metrics
387-
from aws_lambda_powertools.metrics import MetricUnit
388-
389-
import json
390-
391-
def test_log_metrics(capsys):
392-
# GIVEN Metrics is initialized
393-
metrics = Metrics(namespace="ServerlessAirline")
394-
395-
# WHEN we utilize log_metrics to serialize
396-
# and flush all metrics at the end of a function execution
397-
@metrics.log_metrics
398-
def lambda_handler(evt, ctx):
399-
metrics.add_metric(name="SuccessfulBooking", unit=MetricUnit.Count, value=1)
400-
metrics.add_dimension(name="environment", value="prod")
401-
402-
lambda_handler({}, {})
403-
log = capsys.readouterr().out.strip() # remove any extra line
404-
metrics_output = json.loads(log) # deserialize JSON str
405-
406-
# THEN we should have no exceptions
407-
# and a valid EMF object should be flushed correctly
408-
assert "SuccessfulBooking" in log # basic string assertion in JSON str
409-
assert "SuccessfulBooking" in metrics_output["_aws"]["CloudWatchMetrics"][0]["Metrics"][0]["Name"]
280+
```python hl_lines="7 15-16 24-25"
281+
--8<-- "docs/examples/core/metrics/functional_testing.py"
410282
```
411283

412284
=== "Assert multiple EMF blobs with pytest"
413285

414-
```python hl_lines="8-9 11 21-23 25 29-30 32"
415-
from aws_lambda_powertools import Metrics
416-
from aws_lambda_powertools.metrics import MetricUnit
417-
418-
from collections import namedtuple
419-
420-
import json
421-
422-
def capture_metrics_output_multiple_emf_objects(capsys):
423-
return [json.loads(line.strip()) for line in capsys.readouterr().out.split("\n") if line]
424-
425-
def test_log_metrics(capsys):
426-
# GIVEN Metrics is initialized
427-
metrics = Metrics(namespace="ServerlessAirline")
428-
429-
# WHEN log_metrics is used with capture_cold_start_metric
430-
@metrics.log_metrics(capture_cold_start_metric=True)
431-
def lambda_handler(evt, ctx):
432-
metrics.add_metric(name="SuccessfulBooking", unit=MetricUnit.Count, value=1)
433-
metrics.add_dimension(name="environment", value="prod")
434-
435-
# log_metrics uses function_name property from context to add as a dimension for cold start metric
436-
LambdaContext = namedtuple("LambdaContext", "function_name")
437-
lambda_handler({}, LambdaContext("example_fn")
438-
439-
cold_start_blob, custom_metrics_blob = capture_metrics_output_multiple_emf_objects(capsys)
440-
441-
# THEN ColdStart metric and function_name dimension should be logged
442-
# in a separate EMF blob than the application metrics
443-
assert cold_start_blob["ColdStart"] == [1.0]
444-
assert cold_start_blob["function_name"] == "example_fn"
445-
446-
assert "SuccessfulBooking" in custom_metrics_blob # as per previous example
286+
```python hl_lines="8-9 12 22-24 26 30-31 33"
287+
--8<-- "docs/examples/core/metrics/functional_testing_multiple_blobs.py"
447288
```
448289

449290
???+ tip

Diff for: docs/examples/core/metrics/add_metadata.py

+10
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
from aws_lambda_powertools import Metrics
2+
from aws_lambda_powertools.metrics import MetricUnit
3+
4+
metrics = Metrics(namespace="ExampleApplication", service="booking")
5+
6+
7+
@metrics.log_metrics
8+
def lambda_handler(evt, ctx):
9+
metrics.add_metric(name="SuccessfulBooking", unit=MetricUnit.Count, value=1)
10+
metrics.add_metadata(key="booking_id", value="booking_uuid")
+14
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,14 @@
1+
import pytest
2+
3+
from aws_lambda_powertools import Metrics
4+
from aws_lambda_powertools.metrics import metrics as metrics_global
5+
6+
7+
@pytest.fixture(scope="function", autouse=True)
8+
def reset_metric_set():
9+
# Clear out every metric data prior to every test
10+
metrics = Metrics()
11+
metrics.clear_metrics()
12+
metrics_global.is_cold_start = True # ensure each test has cold start
13+
metrics.clear_default_dimensions() # remove persisted default dimensions, if any
14+
yield

Diff for: docs/examples/core/metrics/example_app.py

+8
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,8 @@
1+
from aws_lambda_powertools import Metrics
2+
from aws_lambda_powertools.metrics import MetricUnit
3+
4+
metrics = Metrics() # Sets metric namespace and service via env var
5+
# OR
6+
metrics = Metrics(
7+
namespace="ServerlessAirline", service="orders"
8+
) # Sets metric namespace, and service as a metric dimension

Diff for: docs/examples/core/metrics/flush_metrics.py

+9
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
from aws_lambda_powertools import Metrics
2+
from aws_lambda_powertools.metrics import MetricUnit
3+
4+
metrics = Metrics(namespace="ExampleApplication", service="ExampleService")
5+
6+
7+
@metrics.log_metrics
8+
def lambda_handler(evt, ctx):
9+
metrics.add_metric(name="BookingConfirmation", unit=MetricUnit.Count, value=1)

Diff for: docs/examples/core/metrics/flush_metrics_manually.py

+13
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
import json
2+
3+
from aws_lambda_powertools import Metrics
4+
from aws_lambda_powertools.metrics import MetricUnit
5+
6+
metrics = Metrics(namespace="ExampleApplication", service="booking")
7+
8+
9+
def lambda_handler(evt, ctx):
10+
metrics.add_metric(name="ColdStart", unit=MetricUnit.Count, value=1)
11+
your_metrics_object = metrics.serialize_metric_set()
12+
metrics.clear_metrics()
13+
print(json.dumps(your_metrics_object))

0 commit comments

Comments
 (0)