Skip to content

Commit efe5e28

Browse files
authored
chore: cleanup tests (#79)
* chore: remove Logger deprecated code * chore: remove Metrics deprecated code * chore: remove models from deprecated code * chore: move logger formatter to its own file * chore: cleanup metrics tests * chore: cleanup tracer tests * chore: cleanup logger tests * chore: cleanup tracer tests * chore: set test coverage to 90% min Signed-off-by: heitorlessa <[email protected]>
1 parent f571ce6 commit efe5e28

File tree

7 files changed

+352
-380
lines changed

7 files changed

+352
-380
lines changed

Diff for: aws_lambda_powertools/logging/formatter.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -85,7 +85,7 @@ def format(self, record): # noqa: A003
8585
# Cache the traceback text to avoid converting it multiple times
8686
# (it's constant anyway)
8787
# from logging.Formatter:format
88-
if not record.exc_text:
88+
if not record.exc_text: # pragma: no cover
8989
record.exc_text = self.formatException(record.exc_info)
9090

9191
if record.exc_text:

Diff for: aws_lambda_powertools/tracing/tracer.py

+4-2
Original file line numberDiff line numberDiff line change
@@ -269,8 +269,10 @@ def decorate(event, context):
269269
function_name=lambda_handler_name, data=response, subsegment=subsegment
270270
)
271271
except Exception as err:
272-
logger.exception("Exception received from lambda handler")
273-
self._add_full_exception_as_metadata(function_name=self.service, error=err, subsegment=subsegment)
272+
logger.exception(f"Exception received from {lambda_handler_name}")
273+
self._add_full_exception_as_metadata(
274+
function_name=lambda_handler_name, error=err, subsegment=subsegment
275+
)
274276
raise
275277

276278
return response

Diff for: pyproject.toml

+1
Original file line numberDiff line numberDiff line change
@@ -58,6 +58,7 @@ directory = "test_report"
5858
title = "Lambda Powertools Test Coverage"
5959

6060
[tool.coverage.report]
61+
fail_under = 90
6162
exclude_lines = [
6263
# Have to re-enable the standard pragma
6364
"pragma: no cover",

Diff for: tests/functional/test_logger.py

+105-112
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77

88
from aws_lambda_powertools import Logger, Tracer
99
from aws_lambda_powertools.logging.exceptions import InvalidLoggerSamplingRateError
10-
from aws_lambda_powertools.logging.logger import JsonFormatter, set_package_logger
10+
from aws_lambda_powertools.logging.logger import set_package_logger
1111

1212

1313
@pytest.fixture
@@ -39,227 +39,220 @@ def lambda_context():
3939
return namedtuple("LambdaContext", lambda_context.keys())(*lambda_context.values())
4040

4141

42+
@pytest.fixture
43+
def lambda_event():
44+
return {"greeting": "hello"}
45+
46+
47+
def capture_logging_output(stdout):
48+
return json.loads(stdout.getvalue())
49+
50+
51+
def capture_multiple_logging_statements_output(stdout):
52+
return [json.loads(line.strip()) for line in stdout.getvalue().split("\n") if line]
53+
54+
4255
def test_setup_service_name(root_logger, stdout):
43-
# GIVEN service is explicitly defined
44-
# WHEN logger is setup
45-
# THEN service field should be equals service given
4656
service_name = "payment"
57+
# GIVEN Logger is initialized
58+
# WHEN service is explicitly defined
4759
logger = Logger(service=service_name, stream=stdout)
4860

4961
logger.info("Hello")
50-
log = json.loads(stdout.getvalue())
5162

63+
# THEN service field should be equals service given
64+
log = capture_logging_output(stdout)
5265
assert service_name == log["service"]
5366

5467

5568
def test_setup_no_service_name(stdout):
56-
# GIVEN no service is explicitly defined
57-
# WHEN logger is setup
58-
# THEN service field should be "service_undefined"
69+
# GIVEN Logger is initialized
70+
# WHEN no service is explicitly defined
5971
logger = Logger(stream=stdout)
72+
6073
logger.info("Hello")
61-
log = json.loads(stdout.getvalue())
6274

75+
# THEN service field should be "service_undefined"
76+
log = capture_logging_output(stdout)
6377
assert "service_undefined" == log["service"]
6478

6579

6680
def test_setup_service_env_var(monkeypatch, stdout):
67-
# GIVEN service is explicitly defined via POWERTOOLS_SERVICE_NAME env
68-
# WHEN logger is setup
69-
# THEN service field should be equals POWERTOOLS_SERVICE_NAME value
7081
service_name = "payment"
82+
# GIVEN Logger is initialized
83+
# WHEN service is explicitly defined via POWERTOOLS_SERVICE_NAME env
7184
monkeypatch.setenv("POWERTOOLS_SERVICE_NAME", service_name)
72-
7385
logger = Logger(stream=stdout)
86+
7487
logger.info("Hello")
75-
log = json.loads(stdout.getvalue())
7688

89+
# THEN service field should be equals POWERTOOLS_SERVICE_NAME value
90+
log = capture_logging_output(stdout)
7791
assert service_name == log["service"]
7892

7993

80-
def test_setup_sampling_rate(monkeypatch, stdout):
81-
# GIVEN samping rate is explicitly defined via POWERTOOLS_LOGGER_SAMPLE_RATE env
82-
# WHEN logger is setup
83-
# THEN sampling rate should be equals POWERTOOLS_LOGGER_SAMPLE_RATE value and should sample debug logs
84-
94+
def test_setup_sampling_rate_env_var(monkeypatch, stdout):
95+
# GIVEN Logger is initialized
96+
# WHEN samping rate is explicitly set to 100% via POWERTOOLS_LOGGER_SAMPLE_RATE env
8597
sampling_rate = "1"
8698
monkeypatch.setenv("POWERTOOLS_LOGGER_SAMPLE_RATE", sampling_rate)
87-
monkeypatch.setenv("LOG_LEVEL", "INFO")
88-
89-
logger = Logger(stream=stdout)
99+
logger = Logger(stream=stdout, level="INFO")
90100
logger.debug("I am being sampled")
91-
log = json.loads(stdout.getvalue())
92101

102+
# THEN sampling rate should be equals POWERTOOLS_LOGGER_SAMPLE_RATE value
103+
# log level should be DEBUG
104+
# and debug log statements should be in stdout
105+
log = capture_logging_output(stdout)
93106
assert sampling_rate == log["sampling_rate"]
94107
assert "DEBUG" == log["level"]
95108
assert "I am being sampled" == log["message"]
96109

97110

98111
def test_inject_lambda_context(lambda_context, stdout):
99-
# GIVEN a lambda function is decorated with logger
100-
# WHEN logger is setup
101-
# THEN lambda contextual info should always be in the logs
102-
logger_context_keys = (
103-
"function_name",
104-
"function_memory_size",
105-
"function_arn",
106-
"function_request_id",
107-
)
108-
112+
# GIVEN Logger is initialized
109113
logger = Logger(stream=stdout)
110114

115+
# WHEN a lambda function is decorated with logger
111116
@logger.inject_lambda_context
112117
def handler(event, context):
113118
logger.info("Hello")
114119

115120
handler({}, lambda_context)
116121

117-
log = json.loads(stdout.getvalue())
118-
119-
for key in logger_context_keys:
122+
# THEN lambda contextual info should always be in the logs
123+
log = capture_logging_output(stdout)
124+
expected_logger_context_keys = (
125+
"function_name",
126+
"function_memory_size",
127+
"function_arn",
128+
"function_request_id",
129+
)
130+
for key in expected_logger_context_keys:
120131
assert key in log
121132

122133

123-
def test_inject_lambda_context_log_event_request(lambda_context, stdout):
124-
# GIVEN a lambda function is decorated with logger instructed to log event
125-
# WHEN logger is setup
126-
# THEN logger should log event received from Lambda
127-
lambda_event = {"greeting": "hello"}
128-
134+
def test_inject_lambda_context_log_event_request(lambda_context, stdout, lambda_event):
135+
# GIVEN Logger is initialized
129136
logger = Logger(stream=stdout)
130137

138+
# WHEN a lambda function is decorated with logger instructed to log event
131139
@logger.inject_lambda_context(log_event=True)
132-
# @logger.inject_lambda_context(log_event=True)
133140
def handler(event, context):
134141
logger.info("Hello")
135142

136143
handler(lambda_event, lambda_context)
137144

138-
# Given that our string buffer has many log statements separated by newline \n
139-
# We need to clean it before we can assert on
140-
logs = [json.loads(line.strip()) for line in stdout.getvalue().split("\n") if line]
141-
logged_event, _ = logs
142-
assert "greeting" in logged_event["message"]
145+
# THEN logger should log event received from Lambda
146+
logged_event, _ = capture_multiple_logging_statements_output(stdout)
147+
assert logged_event["message"] == lambda_event
143148

144149

145-
def test_inject_lambda_context_log_event_request_env_var(monkeypatch, lambda_context, stdout):
146-
# GIVEN a lambda function is decorated with logger instructed to log event
147-
# via POWERTOOLS_LOGGER_LOG_EVENT env
148-
# WHEN logger is setup
149-
# THEN logger should log event received from Lambda
150-
lambda_event = {"greeting": "hello"}
150+
def test_inject_lambda_context_log_event_request_env_var(monkeypatch, lambda_context, stdout, lambda_event):
151+
# GIVEN Logger is initialized
151152
monkeypatch.setenv("POWERTOOLS_LOGGER_LOG_EVENT", "true")
152-
153153
logger = Logger(stream=stdout)
154154

155+
# WHEN a lambda function is decorated with logger instructed to log event
156+
# via POWERTOOLS_LOGGER_LOG_EVENT env
155157
@logger.inject_lambda_context
156158
def handler(event, context):
157159
logger.info("Hello")
158160

159161
handler(lambda_event, lambda_context)
160162

161-
# Given that our string buffer has many log statements separated by newline \n
162-
# We need to clean it before we can assert on
163-
logs = [json.loads(line.strip()) for line in stdout.getvalue().split("\n") if line]
164-
165-
event = {}
166-
for log in logs:
167-
if "greeting" in log["message"]:
168-
event = log["message"]
169-
170-
assert event == lambda_event
171-
163+
# THEN logger should log event received from Lambda
164+
logged_event, _ = capture_multiple_logging_statements_output(stdout)
165+
assert logged_event["message"] == lambda_event
172166

173-
def test_inject_lambda_context_log_no_request_by_default(monkeypatch, lambda_context, stdout):
174-
# GIVEN a lambda function is decorated with logger
175-
# WHEN logger is setup
176-
# THEN logger should not log event received by lambda handler
177-
lambda_event = {"greeting": "hello"}
178167

168+
def test_inject_lambda_context_log_no_request_by_default(monkeypatch, lambda_context, stdout, lambda_event):
169+
# GIVEN Logger is initialized
179170
logger = Logger(stream=stdout)
180171

172+
# WHEN a lambda function is decorated with logger
181173
@logger.inject_lambda_context
182174
def handler(event, context):
183175
logger.info("Hello")
184176

185177
handler(lambda_event, lambda_context)
186178

187-
# Given that our string buffer has many log statements separated by newline \n
188-
# We need to clean it before we can assert on
189-
logs = [json.loads(line.strip()) for line in stdout.getvalue().split("\n") if line]
190-
191-
event = {}
192-
for log in logs:
193-
if "greeting" in log["message"]:
194-
event = log["message"]
195-
196-
assert event != lambda_event
179+
# THEN logger should not log event received by lambda handler
180+
log = capture_logging_output(stdout)
181+
assert log["message"] != lambda_event
197182

198183

199184
def test_inject_lambda_cold_start(lambda_context, stdout):
200-
# GIVEN a lambda function is decorated with logger, and called twice
201-
# WHEN logger is setup
202-
# THEN cold_start key should only be true in the first call
203-
185+
# cold_start can be false as it's a global variable in Logger module
186+
# so we reset it to simulate the correct behaviour
187+
# since Lambda will only import our logger lib once per concurrent execution
204188
from aws_lambda_powertools.logging import logger
205189

206-
# # As we run tests in parallel global cold_start value can be false
207-
# # here we reset to simulate the correct behaviour
208-
# # since Lambda will only import our logger lib once per concurrent execution
209190
logger.is_cold_start = True
210191

192+
# GIVEN Logger is initialized
211193
logger = Logger(stream=stdout)
212194

213-
def custom_method():
214-
logger.info("Hello from method")
215-
195+
# WHEN a lambda function is decorated with logger, and called twice
216196
@logger.inject_lambda_context
217197
def handler(event, context):
218-
custom_method()
219198
logger.info("Hello")
220199

221200
handler({}, lambda_context)
222201
handler({}, lambda_context)
223202

224-
# Given that our string buffer has many log statements separated by newline \n
225-
# We need to clean it before we can assert on
226-
logs = [json.loads(line.strip()) for line in stdout.getvalue().split("\n") if line]
227-
first_log, second_log, third_log, fourth_log = logs
228-
229-
# First execution
203+
# THEN cold_start key should only be true in the first call
204+
first_log, second_log = capture_multiple_logging_statements_output(stdout)
230205
assert first_log["cold_start"] is True
231-
assert second_log["cold_start"] is True
232-
233-
# Second execution
234-
assert third_log["cold_start"] is False
235-
assert fourth_log["cold_start"] is False
206+
assert second_log["cold_start"] is False
236207

237208

238-
def test_package_logger(capsys):
209+
def test_package_logger_stream(stdout):
210+
# GIVEN package logger "aws_lambda_powertools" is explicitly set with no params
211+
set_package_logger(stream=stdout)
239212

240-
set_package_logger()
213+
# WHEN Tracer is initialized in disabled mode
241214
Tracer(disabled=True)
242-
output = capsys.readouterr()
243215

244-
assert "Tracing has been disabled" in output.out
216+
# THEN Tracer debug log statement should be logged
217+
output = stdout.getvalue()
218+
logger = logging.getLogger("aws_lambda_powertools")
219+
assert "Tracing has been disabled" in output
220+
assert logger.level == logging.DEBUG
245221

246222

247-
def test_package_logger_format(stdout, capsys):
248-
set_package_logger(stream=stdout, formatter=JsonFormatter(formatter="test"))
223+
def test_package_logger_format(capsys):
224+
# GIVEN package logger "aws_lambda_powertools" is explicitly
225+
# with a custom formatter
226+
formatter = logging.Formatter("message=%(message)s")
227+
set_package_logger(formatter=formatter)
228+
229+
# WHEN Tracer is initialized in disabled mode
249230
Tracer(disabled=True)
250-
output = json.loads(stdout.getvalue().split("\n")[0])
251231

252-
assert "test" in output["formatter"]
232+
# THEN Tracer debug log statement should be logged using `message=` format
233+
output = capsys.readouterr().out
234+
logger = logging.getLogger("aws_lambda_powertools")
235+
assert "message=" in output
236+
assert logger.level == logging.DEBUG
253237

254238

255239
def test_logger_append_duplicated(stdout):
240+
# GIVEN Logger is initialized with request_id field
256241
logger = Logger(stream=stdout, request_id="value")
242+
243+
# WHEN `request_id` is appended to the existing structured log
244+
# using a different value
257245
logger.structure_logs(append=True, request_id="new_value")
258246
logger.info("log")
259-
log = json.loads(stdout.getvalue())
247+
248+
# THEN subsequent log statements should have the latest value
249+
log = capture_logging_output(stdout)
260250
assert "new_value" == log["request_id"]
261251

262252

263253
def test_logger_invalid_sampling_rate():
254+
# GIVEN Logger is initialized
255+
# WHEN sampling_rate non-numeric value
256+
# THEN we should raise InvalidLoggerSamplingRateError
264257
with pytest.raises(InvalidLoggerSamplingRateError):
265258
Logger(sampling_rate="TEST")

0 commit comments

Comments
 (0)