From d66cf982fe32c4aabe4165a98893f560db9ca0bc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=BAben=20Fonseca?= Date: Wed, 25 Jan 2023 12:07:25 +0000 Subject: [PATCH 01/11] fix: make sure multiple e2e tests run concurrently --- tests/e2e/conftest.py | 1 + tests/e2e/event_handler/conftest.py | 9 +++++++-- tests/e2e/idempotency/conftest.py | 9 +++++++-- tests/e2e/logger/conftest.py | 10 +++++++++- tests/e2e/metrics/conftest.py | 7 ++++++- tests/e2e/parameters/conftest.py | 7 ++++++- tests/e2e/streaming/conftest.py | 7 ++++++- tests/e2e/tracer/conftest.py | 9 +++++++-- tests/e2e/utils/infrastructure.py | 8 +++++--- 9 files changed, 54 insertions(+), 13 deletions(-) diff --git a/tests/e2e/conftest.py b/tests/e2e/conftest.py index f59eea9a33b..e7875fef161 100644 --- a/tests/e2e/conftest.py +++ b/tests/e2e/conftest.py @@ -23,6 +23,7 @@ def lambda_layer_build(tmp_path_factory: pytest.TempPathFactory, worker_id: str) layer = LocalLambdaPowertoolsLayer() yield from call_once( + id="lambda_layer", task=layer.build, tmp_path_factory=tmp_path_factory, worker_id=worker_id, diff --git a/tests/e2e/event_handler/conftest.py b/tests/e2e/event_handler/conftest.py index 43941946ac7..acf0d1a6094 100644 --- a/tests/e2e/event_handler/conftest.py +++ b/tests/e2e/event_handler/conftest.py @@ -1,10 +1,11 @@ import pytest from tests.e2e.event_handler.infrastructure import EventHandlerStack +from tests.e2e.utils.infrastructure import call_once @pytest.fixture(autouse=True, scope="module") -def infrastructure(): +def infrastructure(tmp_path_factory: pytest.TempPathFactory, worker_id: str): """Setup and teardown logic for E2E test infrastructure Yields @@ -14,6 +15,10 @@ def infrastructure(): """ stack = EventHandlerStack() try: - yield stack.deploy() + return ( + yield from call_once( + job_id=stack.feature_name, task=stack.deploy, tmp_path_factory=tmp_path_factory, worker_id=worker_id + ) + ) finally: stack.delete() diff --git a/tests/e2e/idempotency/conftest.py b/tests/e2e/idempotency/conftest.py index 24a7c71c1f2..53d7ee606fe 100644 --- a/tests/e2e/idempotency/conftest.py +++ b/tests/e2e/idempotency/conftest.py @@ -1,10 +1,11 @@ import pytest from tests.e2e.idempotency.infrastructure import IdempotencyDynamoDBStack +from tests.e2e.utils.infrastructure import call_once @pytest.fixture(autouse=True, scope="module") -def infrastructure(tmp_path_factory, worker_id): +def infrastructure(tmp_path_factory: pytest.TempPathFactory, worker_id: str): """Setup and teardown logic for E2E test infrastructure Yields @@ -14,6 +15,10 @@ def infrastructure(tmp_path_factory, worker_id): """ stack = IdempotencyDynamoDBStack() try: - yield stack.deploy() + return ( + yield from call_once( + job_id=stack.feature_name, task=stack.deploy, tmp_path_factory=tmp_path_factory, worker_id=worker_id + ) + ) finally: stack.delete() diff --git a/tests/e2e/logger/conftest.py b/tests/e2e/logger/conftest.py index a31be77031b..c865ed98843 100644 --- a/tests/e2e/logger/conftest.py +++ b/tests/e2e/logger/conftest.py @@ -1,6 +1,7 @@ import pytest from tests.e2e.logger.infrastructure import LoggerStack +from tests.e2e.utils.infrastructure import call_once @pytest.fixture(autouse=True, scope="module") @@ -14,6 +15,13 @@ def infrastructure(tmp_path_factory, worker_id): """ stack = LoggerStack() try: - yield stack.deploy() + return ( + yield from call_once( + job_id=stack.feature_name, + task=stack.deploy, + tmp_path_factory=tmp_path_factory, + worker_id=worker_id, + ) + ) finally: stack.delete() diff --git a/tests/e2e/metrics/conftest.py b/tests/e2e/metrics/conftest.py index 2f72e7950be..f82c464a2c2 100644 --- a/tests/e2e/metrics/conftest.py +++ b/tests/e2e/metrics/conftest.py @@ -1,6 +1,7 @@ import pytest from tests.e2e.metrics.infrastructure import MetricsStack +from tests.e2e.utils.infrastructure import call_once @pytest.fixture(autouse=True, scope="module") @@ -14,6 +15,10 @@ def infrastructure(tmp_path_factory, worker_id): """ stack = MetricsStack() try: - yield stack.deploy() + return ( + yield from call_once( + job_id=stack.feature_name, task=stack.deploy, tmp_path_factory=tmp_path_factory, worker_id=worker_id + ) + ) finally: stack.delete() diff --git a/tests/e2e/parameters/conftest.py b/tests/e2e/parameters/conftest.py index f4c9d7396dd..3cce6883671 100644 --- a/tests/e2e/parameters/conftest.py +++ b/tests/e2e/parameters/conftest.py @@ -1,6 +1,7 @@ import pytest from tests.e2e.parameters.infrastructure import ParametersStack +from tests.e2e.utils.infrastructure import call_once @pytest.fixture(autouse=True, scope="module") @@ -14,6 +15,10 @@ def infrastructure(tmp_path_factory, worker_id): """ stack = ParametersStack() try: - yield stack.deploy() + return ( + yield from call_once( + job_id=stack.feature_name, task=stack.deploy, tmp_path_factory=tmp_path_factory, worker_id=worker_id + ) + ) finally: stack.delete() diff --git a/tests/e2e/streaming/conftest.py b/tests/e2e/streaming/conftest.py index c3a44365d39..373d7a34b1d 100644 --- a/tests/e2e/streaming/conftest.py +++ b/tests/e2e/streaming/conftest.py @@ -1,6 +1,7 @@ import pytest from tests.e2e.streaming.infrastructure import StreamingStack +from tests.e2e.utils.infrastructure import call_once @pytest.fixture(autouse=True, scope="module") @@ -14,6 +15,10 @@ def infrastructure(tmp_path_factory, worker_id): """ stack = StreamingStack() try: - yield stack.deploy() + return ( + yield from call_once( + job_id=stack.feature_name, task=stack.deploy, tmp_path_factory=tmp_path_factory, worker_id=worker_id + ) + ) finally: stack.delete() diff --git a/tests/e2e/tracer/conftest.py b/tests/e2e/tracer/conftest.py index afb34ffee2b..00469c08848 100644 --- a/tests/e2e/tracer/conftest.py +++ b/tests/e2e/tracer/conftest.py @@ -1,10 +1,11 @@ import pytest from tests.e2e.tracer.infrastructure import TracerStack +from tests.e2e.utils.infrastructure import call_once @pytest.fixture(autouse=True, scope="module") -def infrastructure(): +def infrastructure(tmp_path_factory, worker_id): """Setup and teardown logic for E2E test infrastructure @@ -15,6 +16,10 @@ def infrastructure(): """ stack = TracerStack() try: - yield stack.deploy() + return ( + yield from call_once( + job_id=stack.feature_name, task=stack.deploy, tmp_path_factory=tmp_path_factory, worker_id=worker_id + ) + ) finally: stack.delete() diff --git a/tests/e2e/utils/infrastructure.py b/tests/e2e/utils/infrastructure.py index daf1a817c89..23dfe8afd0c 100644 --- a/tests/e2e/utils/infrastructure.py +++ b/tests/e2e/utils/infrastructure.py @@ -1,7 +1,6 @@ import json import logging import os -import platform import subprocess import sys import textwrap @@ -57,7 +56,7 @@ def __init__(self) -> None: self._feature_infra_module_path = self.feature_path / "infrastructure" self._feature_infra_file = self.feature_path / "infrastructure.py" self._handlers_dir = self.feature_path / "handlers" - self._cdk_out_dir: Path = CDK_OUT_PATH / "-".join(platform.python_version_tuple()) / self.feature_name + self._cdk_out_dir: Path = CDK_OUT_PATH / self.feature_name self._stack_outputs_file = f'{self._cdk_out_dir / "stack_outputs.json"}' if not self._feature_infra_file.exists(): @@ -287,6 +286,7 @@ def add_cfn_output(self, name: str, value: str, arn: str = ""): def call_once( + job_id: str, task: Callable, tmp_path_factory: pytest.TempPathFactory, worker_id: str, @@ -296,6 +296,8 @@ def call_once( Parameters ---------- + id : str + Random string that uniquely identifies this call task : Callable Function to call once and JSON serialize result whether parallel test is enabled or not. tmp_path_factory : pytest.TempPathFactory @@ -318,7 +320,7 @@ def call_once( else: # tmp dir shared by all workers root_tmp_dir = tmp_path_factory.getbasetemp().parent - cache = root_tmp_dir / f"{PYTHON_RUNTIME_VERSION}_cache.json" + cache = root_tmp_dir / f"{PYTHON_RUNTIME_VERSION}_{job_id}_cache.json" with FileLock(f"{cache}.lock"): # If cache exists, return task outputs back From 02bc30bea4e64c9ccc49513a246ba9eea5ccff3f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=BAben=20Fonseca?= Date: Wed, 25 Jan 2023 12:12:48 +0000 Subject: [PATCH 02/11] fix: parameter --- tests/e2e/conftest.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/e2e/conftest.py b/tests/e2e/conftest.py index e7875fef161..358479583f5 100644 --- a/tests/e2e/conftest.py +++ b/tests/e2e/conftest.py @@ -23,7 +23,7 @@ def lambda_layer_build(tmp_path_factory: pytest.TempPathFactory, worker_id: str) layer = LocalLambdaPowertoolsLayer() yield from call_once( - id="lambda_layer", + job_id="lambda_layer", task=layer.build, tmp_path_factory=tmp_path_factory, worker_id=worker_id, From 51ea440f4dd972de7a1b105983dce67a8fec716a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=BAben=20Fonseca?= Date: Wed, 25 Jan 2023 16:47:26 +0000 Subject: [PATCH 03/11] fix: use xdist --- parallel_run_e2e.py | 2 +- tests/e2e/conftest.py | 1 - tests/e2e/event_handler/conftest.py | 11 +++-------- .../event_handler/test_header_serializer.py | 5 +++++ .../test_paths_ending_with_slash.py | 4 ++++ tests/e2e/idempotency/conftest.py | 11 +++-------- .../idempotency/test_idempotency_dynamodb.py | 3 +++ tests/e2e/logger/conftest.py | 14 +++----------- tests/e2e/logger/test_logger.py | 1 + tests/e2e/metrics/conftest.py | 11 +++-------- tests/e2e/metrics/test_metrics.py | 2 ++ tests/e2e/parameters/conftest.py | 11 +++-------- tests/e2e/parameters/test_appconfig.py | 1 + tests/e2e/parameters/test_ssm.py | 2 +- tests/e2e/streaming/conftest.py | 11 +++-------- tests/e2e/streaming/test_s3_object.py | 18 ++++++++++++++++++ tests/e2e/tracer/conftest.py | 11 +++-------- tests/e2e/tracer/test_tracer.py | 3 +++ tests/e2e/utils/infrastructure.py | 5 +---- 19 files changed, 61 insertions(+), 66 deletions(-) diff --git a/parallel_run_e2e.py b/parallel_run_e2e.py index be3e1ab0cf8..1f96aa7b9fc 100755 --- a/parallel_run_e2e.py +++ b/parallel_run_e2e.py @@ -8,7 +8,7 @@ def main(): features = Path("tests/e2e").rglob("infrastructure.py") workers = len(list(features)) - 1 - command = f"poetry run pytest -n {workers} --dist loadfile -o log_cli=true tests/e2e" + command = f"poetry run pytest -n {workers} --dist loadgroup -o log_cli=true tests/e2e" result = subprocess.run(command.split(), shell=False) sys.exit(result.returncode) diff --git a/tests/e2e/conftest.py b/tests/e2e/conftest.py index 358479583f5..f59eea9a33b 100644 --- a/tests/e2e/conftest.py +++ b/tests/e2e/conftest.py @@ -23,7 +23,6 @@ def lambda_layer_build(tmp_path_factory: pytest.TempPathFactory, worker_id: str) layer = LocalLambdaPowertoolsLayer() yield from call_once( - job_id="lambda_layer", task=layer.build, tmp_path_factory=tmp_path_factory, worker_id=worker_id, diff --git a/tests/e2e/event_handler/conftest.py b/tests/e2e/event_handler/conftest.py index acf0d1a6094..664c870e1de 100644 --- a/tests/e2e/event_handler/conftest.py +++ b/tests/e2e/event_handler/conftest.py @@ -1,11 +1,10 @@ import pytest from tests.e2e.event_handler.infrastructure import EventHandlerStack -from tests.e2e.utils.infrastructure import call_once -@pytest.fixture(autouse=True, scope="module") -def infrastructure(tmp_path_factory: pytest.TempPathFactory, worker_id: str): +@pytest.fixture(autouse=True, scope="package") +def infrastructure(): """Setup and teardown logic for E2E test infrastructure Yields @@ -15,10 +14,6 @@ def infrastructure(tmp_path_factory: pytest.TempPathFactory, worker_id: str): """ stack = EventHandlerStack() try: - return ( - yield from call_once( - job_id=stack.feature_name, task=stack.deploy, tmp_path_factory=tmp_path_factory, worker_id=worker_id - ) - ) + yield stack.deploy() finally: stack.delete() diff --git a/tests/e2e/event_handler/test_header_serializer.py b/tests/e2e/event_handler/test_header_serializer.py index eedb69ccaad..5026bf6aa4a 100644 --- a/tests/e2e/event_handler/test_header_serializer.py +++ b/tests/e2e/event_handler/test_header_serializer.py @@ -36,6 +36,7 @@ def lambda_function_url_endpoint(infrastructure: dict) -> str: return infrastructure.get("LambdaFunctionUrl", "") +@pytest.mark.xdist_group(name="event_handler") def test_alb_headers_serializer(alb_basic_listener_endpoint): # GIVEN url = f"{alb_basic_listener_endpoint}/todos" @@ -74,6 +75,7 @@ def test_alb_headers_serializer(alb_basic_listener_endpoint): assert response.cookies.get(last_cookie.name) == last_cookie.value +@pytest.mark.xdist_group(name="event_handler") def test_alb_multi_value_headers_serializer(alb_multi_value_header_listener_endpoint): # GIVEN url = f"{alb_multi_value_header_listener_endpoint}/todos" @@ -112,6 +114,7 @@ def test_alb_multi_value_headers_serializer(alb_multi_value_header_listener_endp assert response.cookies.get(cookie.name) == cookie.value +@pytest.mark.xdist_group(name="event_handler") def test_api_gateway_rest_headers_serializer(apigw_rest_endpoint): # GIVEN url = f"{apigw_rest_endpoint}todos" @@ -147,6 +150,7 @@ def test_api_gateway_rest_headers_serializer(apigw_rest_endpoint): assert response.cookies.get(cookie.name) == cookie.value +@pytest.mark.xdist_group(name="event_handler") def test_api_gateway_http_headers_serializer(apigw_http_endpoint): # GIVEN url = f"{apigw_http_endpoint}todos" @@ -182,6 +186,7 @@ def test_api_gateway_http_headers_serializer(apigw_http_endpoint): assert response.cookies.get(cookie.name) == cookie.value +@pytest.mark.xdist_group(name="event_handler") def test_lambda_function_url_headers_serializer(lambda_function_url_endpoint): # GIVEN url = f"{lambda_function_url_endpoint}todos" # the function url endpoint already has the trailing / diff --git a/tests/e2e/event_handler/test_paths_ending_with_slash.py b/tests/e2e/event_handler/test_paths_ending_with_slash.py index 4c1461d6fc5..1944768c2ff 100644 --- a/tests/e2e/event_handler/test_paths_ending_with_slash.py +++ b/tests/e2e/event_handler/test_paths_ending_with_slash.py @@ -33,6 +33,7 @@ def lambda_function_url_endpoint(infrastructure: dict) -> str: return infrastructure.get("LambdaFunctionUrl", "") +@pytest.mark.xdist_group(name="event_handler") def test_api_gateway_rest_trailing_slash(apigw_rest_endpoint): # GIVEN API URL ends in a trailing slash url = f"{apigw_rest_endpoint}todos/" @@ -51,6 +52,7 @@ def test_api_gateway_rest_trailing_slash(apigw_rest_endpoint): assert response.status_code == 200 +@pytest.mark.xdist_group(name="event_handler") def test_api_gateway_http_trailing_slash(apigw_http_endpoint): # GIVEN the URL for the API ends in a trailing slash API gateway should return a 404 url = f"{apigw_http_endpoint}todos/" @@ -67,6 +69,7 @@ def test_api_gateway_http_trailing_slash(apigw_http_endpoint): ) +@pytest.mark.xdist_group(name="event_handler") def test_lambda_function_url_trailing_slash(lambda_function_url_endpoint): # GIVEN the URL for the API ends in a trailing slash it should behave as if there was not one url = f"{lambda_function_url_endpoint}todos/" # the function url endpoint already has the trailing / @@ -83,6 +86,7 @@ def test_lambda_function_url_trailing_slash(lambda_function_url_endpoint): ) +@pytest.mark.xdist_group(name="event_handler") def test_alb_url_trailing_slash(alb_multi_value_header_listener_endpoint): # GIVEN url has a trailing slash - it should behave as if there was not one url = f"{alb_multi_value_header_listener_endpoint}/todos/" diff --git a/tests/e2e/idempotency/conftest.py b/tests/e2e/idempotency/conftest.py index 53d7ee606fe..61578d904a6 100644 --- a/tests/e2e/idempotency/conftest.py +++ b/tests/e2e/idempotency/conftest.py @@ -1,11 +1,10 @@ import pytest from tests.e2e.idempotency.infrastructure import IdempotencyDynamoDBStack -from tests.e2e.utils.infrastructure import call_once -@pytest.fixture(autouse=True, scope="module") -def infrastructure(tmp_path_factory: pytest.TempPathFactory, worker_id: str): +@pytest.fixture(autouse=True, scope="package") +def infrastructure(): """Setup and teardown logic for E2E test infrastructure Yields @@ -15,10 +14,6 @@ def infrastructure(tmp_path_factory: pytest.TempPathFactory, worker_id: str): """ stack = IdempotencyDynamoDBStack() try: - return ( - yield from call_once( - job_id=stack.feature_name, task=stack.deploy, tmp_path_factory=tmp_path_factory, worker_id=worker_id - ) - ) + yield stack.deploy() finally: stack.delete() diff --git a/tests/e2e/idempotency/test_idempotency_dynamodb.py b/tests/e2e/idempotency/test_idempotency_dynamodb.py index 87b61d285ec..d3452a1a161 100644 --- a/tests/e2e/idempotency/test_idempotency_dynamodb.py +++ b/tests/e2e/idempotency/test_idempotency_dynamodb.py @@ -27,6 +27,7 @@ def idempotency_table_name(infrastructure: dict) -> str: return infrastructure.get("DynamoDBTable", "") +@pytest.mark.xdist_group(name="idempotency") def test_ttl_caching_expiration_idempotency(ttl_cache_expiration_handler_fn_arn: str): # GIVEN payload = json.dumps({"message": "Lambda Powertools - TTL 5s"}) @@ -56,6 +57,7 @@ def test_ttl_caching_expiration_idempotency(ttl_cache_expiration_handler_fn_arn: assert third_execution_response != second_execution_response +@pytest.mark.xdist_group(name="idempotency") def test_ttl_caching_timeout_idempotency(ttl_cache_timeout_handler_fn_arn: str): # GIVEN payload_timeout_execution = json.dumps({"sleep": 5, "message": "Lambda Powertools - TTL 1s"}) @@ -79,6 +81,7 @@ def test_ttl_caching_timeout_idempotency(ttl_cache_timeout_handler_fn_arn: str): assert payload_working_execution == execution_working_response +@pytest.mark.xdist_group(name="idempotency") def test_parallel_execution_idempotency(parallel_execution_handler_fn_arn: str): # GIVEN arguments = json.dumps({"message": "Lambda Powertools - Parallel execution"}) diff --git a/tests/e2e/logger/conftest.py b/tests/e2e/logger/conftest.py index c865ed98843..ad336931a93 100644 --- a/tests/e2e/logger/conftest.py +++ b/tests/e2e/logger/conftest.py @@ -1,11 +1,10 @@ import pytest from tests.e2e.logger.infrastructure import LoggerStack -from tests.e2e.utils.infrastructure import call_once -@pytest.fixture(autouse=True, scope="module") -def infrastructure(tmp_path_factory, worker_id): +@pytest.fixture(autouse=True, scope="package") +def infrastructure(): """Setup and teardown logic for E2E test infrastructure Yields @@ -15,13 +14,6 @@ def infrastructure(tmp_path_factory, worker_id): """ stack = LoggerStack() try: - return ( - yield from call_once( - job_id=stack.feature_name, - task=stack.deploy, - tmp_path_factory=tmp_path_factory, - worker_id=worker_id, - ) - ) + yield stack.deploy() finally: stack.delete() diff --git a/tests/e2e/logger/test_logger.py b/tests/e2e/logger/test_logger.py index e5c27dd0a8f..28ee9c0aac0 100644 --- a/tests/e2e/logger/test_logger.py +++ b/tests/e2e/logger/test_logger.py @@ -17,6 +17,7 @@ def basic_handler_fn_arn(infrastructure: dict) -> str: return infrastructure.get("BasicHandlerArn", "") +@pytest.mark.xdist_group(name="logger") def test_basic_lambda_logs_visible(basic_handler_fn, basic_handler_fn_arn): # GIVEN message = "logs should be visible with default settings" diff --git a/tests/e2e/metrics/conftest.py b/tests/e2e/metrics/conftest.py index f82c464a2c2..197aaff847f 100644 --- a/tests/e2e/metrics/conftest.py +++ b/tests/e2e/metrics/conftest.py @@ -1,11 +1,10 @@ import pytest from tests.e2e.metrics.infrastructure import MetricsStack -from tests.e2e.utils.infrastructure import call_once -@pytest.fixture(autouse=True, scope="module") -def infrastructure(tmp_path_factory, worker_id): +@pytest.fixture(autouse=True, scope="package") +def infrastructure(): """Setup and teardown logic for E2E test infrastructure Yields @@ -15,10 +14,6 @@ def infrastructure(tmp_path_factory, worker_id): """ stack = MetricsStack() try: - return ( - yield from call_once( - job_id=stack.feature_name, task=stack.deploy, tmp_path_factory=tmp_path_factory, worker_id=worker_id - ) - ) + yield stack.deploy() finally: stack.delete() diff --git a/tests/e2e/metrics/test_metrics.py b/tests/e2e/metrics/test_metrics.py index 516f93ac1f0..192cbcc25af 100644 --- a/tests/e2e/metrics/test_metrics.py +++ b/tests/e2e/metrics/test_metrics.py @@ -28,6 +28,7 @@ def cold_start_fn_arn(infrastructure: dict) -> str: METRIC_NAMESPACE = "powertools-e2e-metric" +@pytest.mark.xdist_group(name="metrics") def test_basic_lambda_metric_is_visible(basic_handler_fn: str, basic_handler_fn_arn: str): # GIVEN metric_name = data_builder.build_metric_name() @@ -47,6 +48,7 @@ def test_basic_lambda_metric_is_visible(basic_handler_fn: str, basic_handler_fn_ assert metric_values == [3.0] +@pytest.mark.xdist_group(name="metrics") def test_cold_start_metric(cold_start_fn_arn: str, cold_start_fn: str): # GIVEN metric_name = "ColdStart" diff --git a/tests/e2e/parameters/conftest.py b/tests/e2e/parameters/conftest.py index 3cce6883671..99146607384 100644 --- a/tests/e2e/parameters/conftest.py +++ b/tests/e2e/parameters/conftest.py @@ -1,11 +1,10 @@ import pytest from tests.e2e.parameters.infrastructure import ParametersStack -from tests.e2e.utils.infrastructure import call_once -@pytest.fixture(autouse=True, scope="module") -def infrastructure(tmp_path_factory, worker_id): +@pytest.fixture(autouse=True, scope="package") +def infrastructure(): """Setup and teardown logic for E2E test infrastructure Yields @@ -15,10 +14,6 @@ def infrastructure(tmp_path_factory, worker_id): """ stack = ParametersStack() try: - return ( - yield from call_once( - job_id=stack.feature_name, task=stack.deploy, tmp_path_factory=tmp_path_factory, worker_id=worker_id - ) - ) + yield stack.deploy() finally: stack.delete() diff --git a/tests/e2e/parameters/test_appconfig.py b/tests/e2e/parameters/test_appconfig.py index 0129adb1515..7cf6f87067f 100644 --- a/tests/e2e/parameters/test_appconfig.py +++ b/tests/e2e/parameters/test_appconfig.py @@ -35,6 +35,7 @@ def parameter_appconfig_freeform_profile(infrastructure: dict) -> str: return infrastructure.get("AppConfigProfile", "") +@pytest.mark.xdist_group(name="parameters") def test_get_parameter_appconfig_freeform( parameter_appconfig_freeform_handler_fn_arn: str, parameter_appconfig_freeform_value: str, diff --git a/tests/e2e/parameters/test_ssm.py b/tests/e2e/parameters/test_ssm.py index 7e9614f8ea0..239813fab51 100644 --- a/tests/e2e/parameters/test_ssm.py +++ b/tests/e2e/parameters/test_ssm.py @@ -17,7 +17,7 @@ def parameters_list(infrastructure: dict) -> List[str]: return json.loads(param_list) -# +@pytest.mark.xdist_group(name="parameters") def test_get_parameters_by_name( ssm_get_parameters_by_name_fn_arn: str, parameters_list: str, diff --git a/tests/e2e/streaming/conftest.py b/tests/e2e/streaming/conftest.py index 373d7a34b1d..94f7f212af0 100644 --- a/tests/e2e/streaming/conftest.py +++ b/tests/e2e/streaming/conftest.py @@ -1,11 +1,10 @@ import pytest from tests.e2e.streaming.infrastructure import StreamingStack -from tests.e2e.utils.infrastructure import call_once -@pytest.fixture(autouse=True, scope="module") -def infrastructure(tmp_path_factory, worker_id): +@pytest.fixture(autouse=True, scope="package") +def infrastructure(): """Setup and teardown logic for E2E test infrastructure Yields @@ -15,10 +14,6 @@ def infrastructure(tmp_path_factory, worker_id): """ stack = StreamingStack() try: - return ( - yield from call_once( - job_id=stack.feature_name, task=stack.deploy, tmp_path_factory=tmp_path_factory, worker_id=worker_id - ) - ) + yield stack.deploy() finally: stack.delete() diff --git a/tests/e2e/streaming/test_s3_object.py b/tests/e2e/streaming/test_s3_object.py index 1e2fe1a0222..fe4fd638b10 100644 --- a/tests/e2e/streaming/test_s3_object.py +++ b/tests/e2e/streaming/test_s3_object.py @@ -21,6 +21,7 @@ def s3_object_handler_fn_arn(infrastructure: dict) -> str: return infrastructure.get("S3ObjectHandler", "") +@pytest.mark.xdist_group(name="streaming") def get_object_version(bucket, key) -> str: s3 = boto3.client("s3") versions = s3.list_object_versions(Bucket=bucket) @@ -43,6 +44,7 @@ def get_lambda_result_payload(s3_object_handler_fn_arn: str, payload: dict) -> d return json.loads(handler_result["Payload"].read()) +@pytest.mark.xdist_group(name="streaming") def test_s3_object_size(s3_object_handler_fn_arn, regular_bucket_name): payload = {"bucket": regular_bucket_name, "key": "plain.txt"} result = get_lambda_result_payload(s3_object_handler_fn_arn, payload) @@ -50,6 +52,7 @@ def test_s3_object_size(s3_object_handler_fn_arn, regular_bucket_name): assert result.get("body") == "hello world" +@pytest.mark.xdist_group(name="streaming") def test_s3_versioned_object_size(s3_object_handler_fn_arn, versioned_bucket_name): key = "plain.txt" payload = { @@ -62,18 +65,21 @@ def test_s3_versioned_object_size(s3_object_handler_fn_arn, versioned_bucket_nam assert result.get("body") == "hello world" +@pytest.mark.xdist_group(name="streaming") def test_s3_object_non_existent(s3_object_handler_fn_arn, regular_bucket_name): payload = {"bucket": regular_bucket_name, "key": "NOTEXISTENT.txt"} result = get_lambda_result_payload(s3_object_handler_fn_arn, payload) assert result.get("error") == "Not found" +@pytest.mark.xdist_group(name="streaming") def test_s3_object_csv_constructor(s3_object_handler_fn_arn, regular_bucket_name): payload = {"bucket": regular_bucket_name, "key": "csv.txt", "is_csv": True} result = get_lambda_result_payload(s3_object_handler_fn_arn, payload) assert result.get("body") == {"name": "hello", "value": "world"} +@pytest.mark.xdist_group(name="streaming") def test_s3_versioned_object_csv_constructor(s3_object_handler_fn_arn, versioned_bucket_name): key = "csv.txt" payload = { @@ -86,24 +92,28 @@ def test_s3_versioned_object_csv_constructor(s3_object_handler_fn_arn, versioned assert result.get("body") == {"name": "hello", "value": "world"} +@pytest.mark.xdist_group(name="streaming") def test_s3_object_csv_transform(s3_object_handler_fn_arn, regular_bucket_name): payload = {"bucket": regular_bucket_name, "key": "csv.txt", "transform_csv": True} result = get_lambda_result_payload(s3_object_handler_fn_arn, payload) assert result.get("body") == {"name": "hello", "value": "world"} +@pytest.mark.xdist_group(name="streaming") def test_s3_object_csv_transform_in_place(s3_object_handler_fn_arn, regular_bucket_name): payload = {"bucket": regular_bucket_name, "key": "csv.txt", "transform_csv": True, "in_place": True} result = get_lambda_result_payload(s3_object_handler_fn_arn, payload) assert result.get("body") == {"name": "hello", "value": "world"} +@pytest.mark.xdist_group(name="streaming") def test_s3_object_csv_gzip_constructor(s3_object_handler_fn_arn, regular_bucket_name): payload = {"bucket": regular_bucket_name, "key": "csv.txt.gz", "is_csv": True, "is_gzip": True} result = get_lambda_result_payload(s3_object_handler_fn_arn, payload) assert result.get("body") == {"name": "hello", "value": "world"} +@pytest.mark.xdist_group(name="streaming") def test_s3_versioned_object_csv_gzip_constructor(s3_object_handler_fn_arn, versioned_bucket_name): key = "csv.txt.gz" payload = { @@ -117,12 +127,14 @@ def test_s3_versioned_object_csv_gzip_constructor(s3_object_handler_fn_arn, vers assert result.get("body") == {"name": "hello", "value": "world"} +@pytest.mark.xdist_group(name="streaming") def test_s3_object_gzip_constructor(s3_object_handler_fn_arn, regular_bucket_name): payload = {"bucket": regular_bucket_name, "key": "plain.txt.gz", "is_gzip": True} result = get_lambda_result_payload(s3_object_handler_fn_arn, payload) assert result.get("body") == "hello world" +@pytest.mark.xdist_group(name="streaming") def test_s3_versioned_object_gzip_constructor(s3_object_handler_fn_arn, versioned_bucket_name): key = "plain.txt.gz" payload = { @@ -135,18 +147,21 @@ def test_s3_versioned_object_gzip_constructor(s3_object_handler_fn_arn, versione assert result.get("body") == "hello world" +@pytest.mark.xdist_group(name="streaming") def test_s3_object_gzip_transform(s3_object_handler_fn_arn, regular_bucket_name): payload = {"bucket": regular_bucket_name, "key": "plain.txt.gz", "transform_gzip": True} result = get_lambda_result_payload(s3_object_handler_fn_arn, payload) assert result.get("body") == "hello world" +@pytest.mark.xdist_group(name="streaming") def test_s3_object_gzip_transform_in_place(s3_object_handler_fn_arn, regular_bucket_name): payload = {"bucket": regular_bucket_name, "key": "plain.txt.gz", "transform_gzip": True, "in_place": True} result = get_lambda_result_payload(s3_object_handler_fn_arn, payload) assert result.get("body") == "hello world" +@pytest.mark.xdist_group(name="streaming") def test_s3_object_zip_transform(s3_object_handler_fn_arn, regular_bucket_name): payload = {"bucket": regular_bucket_name, "key": "fileset.zip", "transform_zip": True} result = get_lambda_result_payload(s3_object_handler_fn_arn, payload) @@ -154,6 +169,7 @@ def test_s3_object_zip_transform(s3_object_handler_fn_arn, regular_bucket_name): assert result.get("body") == "This is file 2" +@pytest.mark.xdist_group(name="streaming") def test_s3_object_zip_transform_in_place(s3_object_handler_fn_arn, regular_bucket_name): payload = {"bucket": regular_bucket_name, "key": "fileset.zip", "transform_zip": True, "in_place": True} result = get_lambda_result_payload(s3_object_handler_fn_arn, payload) @@ -161,6 +177,7 @@ def test_s3_object_zip_transform_in_place(s3_object_handler_fn_arn, regular_buck assert result.get("body") == "This is file 2" +@pytest.mark.xdist_group(name="streaming") def test_s3_object_zip_lzma_transform(s3_object_handler_fn_arn, regular_bucket_name): payload = {"bucket": regular_bucket_name, "key": "fileset.zip.lzma", "transform_zip_lzma": True} result = get_lambda_result_payload(s3_object_handler_fn_arn, payload) @@ -168,6 +185,7 @@ def test_s3_object_zip_lzma_transform(s3_object_handler_fn_arn, regular_bucket_n assert result.get("body") == "This is file 2" +@pytest.mark.xdist_group(name="streaming") def test_s3_object_zip_lzma_transform_in_place(s3_object_handler_fn_arn, regular_bucket_name): payload = {"bucket": regular_bucket_name, "key": "fileset.zip.lzma", "transform_zip_lzma": True, "in_place": True} result = get_lambda_result_payload(s3_object_handler_fn_arn, payload) diff --git a/tests/e2e/tracer/conftest.py b/tests/e2e/tracer/conftest.py index 00469c08848..d3728ab91ba 100644 --- a/tests/e2e/tracer/conftest.py +++ b/tests/e2e/tracer/conftest.py @@ -1,11 +1,10 @@ import pytest from tests.e2e.tracer.infrastructure import TracerStack -from tests.e2e.utils.infrastructure import call_once -@pytest.fixture(autouse=True, scope="module") -def infrastructure(tmp_path_factory, worker_id): +@pytest.fixture(autouse=True, scope="package") +def infrastructure(): """Setup and teardown logic for E2E test infrastructure @@ -16,10 +15,6 @@ def infrastructure(tmp_path_factory, worker_id): """ stack = TracerStack() try: - return ( - yield from call_once( - job_id=stack.feature_name, task=stack.deploy, tmp_path_factory=tmp_path_factory, worker_id=worker_id - ) - ) + yield stack.deploy() finally: stack.delete() diff --git a/tests/e2e/tracer/test_tracer.py b/tests/e2e/tracer/test_tracer.py index e2abc5af6bc..5dfe68ee08c 100644 --- a/tests/e2e/tracer/test_tracer.py +++ b/tests/e2e/tracer/test_tracer.py @@ -36,6 +36,7 @@ def async_fn(infrastructure: dict) -> str: return infrastructure.get("AsyncCapture", "") +@pytest.mark.xdist_group(name="tracer") def test_lambda_handler_trace_is_visible(basic_handler_fn_arn: str, basic_handler_fn: str): # GIVEN service = data_builder.build_service_name() @@ -64,6 +65,7 @@ def test_lambda_handler_trace_is_visible(basic_handler_fn_arn: str, basic_handle assert len(trace.get_subsegment(name=method_subsegment)) == 2 +@pytest.mark.xdist_group(name="tracer") def test_lambda_handler_trace_multiple_functions_same_name(same_function_name_arn: str, same_function_name_fn: str): # GIVEN service = data_builder.build_service_name() @@ -90,6 +92,7 @@ def test_lambda_handler_trace_multiple_functions_same_name(same_function_name_ar assert len(trace.get_subsegment(name=method_subsegment_comments)) == 1 +@pytest.mark.xdist_group(name="tracer") def test_async_trace_is_visible(async_fn_arn: str, async_fn: str): # GIVEN service = data_builder.build_service_name() diff --git a/tests/e2e/utils/infrastructure.py b/tests/e2e/utils/infrastructure.py index 23dfe8afd0c..29e45b83abf 100644 --- a/tests/e2e/utils/infrastructure.py +++ b/tests/e2e/utils/infrastructure.py @@ -286,7 +286,6 @@ def add_cfn_output(self, name: str, value: str, arn: str = ""): def call_once( - job_id: str, task: Callable, tmp_path_factory: pytest.TempPathFactory, worker_id: str, @@ -296,8 +295,6 @@ def call_once( Parameters ---------- - id : str - Random string that uniquely identifies this call task : Callable Function to call once and JSON serialize result whether parallel test is enabled or not. tmp_path_factory : pytest.TempPathFactory @@ -320,7 +317,7 @@ def call_once( else: # tmp dir shared by all workers root_tmp_dir = tmp_path_factory.getbasetemp().parent - cache = root_tmp_dir / f"{PYTHON_RUNTIME_VERSION}_{job_id}_cache.json" + cache = root_tmp_dir / f"{PYTHON_RUNTIME_VERSION}_cache.json" with FileLock(f"{cache}.lock"): # If cache exists, return task outputs back From c70425ef256bc77119c2950c76f275fd5530672c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=BAben=20Fonseca?= Date: Fri, 27 Jan 2023 11:30:35 +0100 Subject: [PATCH 04/11] fix: parallel lambdas --- tests/e2e/utils/functions.py | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/tests/e2e/utils/functions.py b/tests/e2e/utils/functions.py index 7b64c439298..de71a0b866c 100644 --- a/tests/e2e/utils/functions.py +++ b/tests/e2e/utils/functions.py @@ -1,14 +1,24 @@ -from concurrent.futures import ThreadPoolExecutor +import time +from concurrent.futures import Future, ThreadPoolExecutor +from typing import List from tests.e2e.utils import data_fetcher # noqa F401 def execute_lambdas_in_parallel(function_name: str, lambdas_arn: list, arguments: str): + def f(function_name, arn, arguments): + eval(function_name)(arn, arguments) + result_list = [] with ThreadPoolExecutor() as executor: - running_tasks = executor.map(lambda exec: eval(function_name)(*exec), [(arn, arguments) for arn in lambdas_arn]) + running_tasks: List[Future] = [] + for arn in lambdas_arn: + time.sleep(0.5 * len(running_tasks)) + running_tasks.append(executor.submit(f, function_name, arn, arguments)) + executor.shutdown(wait=True) + for running_task in running_tasks: - result_list.append(running_task) + result_list.append(running_task.result()) return result_list From f395f51486e47baad4d586cd0aa76a0f05ef1e98 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=BAben=20Fonseca?= Date: Fri, 27 Jan 2023 11:40:12 +0100 Subject: [PATCH 05/11] fix: don't kill e2e tests when one version fails --- .github/workflows/run-e2e-tests.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/run-e2e-tests.yml b/.github/workflows/run-e2e-tests.yml index 06196b97f92..7a8fb00ebbe 100644 --- a/.github/workflows/run-e2e-tests.yml +++ b/.github/workflows/run-e2e-tests.yml @@ -26,6 +26,7 @@ jobs: id-token: write # needed to request JWT with GitHub's OIDC Token endpoint. docs: https://bit.ly/3MNgQO9 contents: read strategy: + fail-fast: false # needed so if a version fails, the others will still be able to complete and cleanup matrix: version: ["3.7", "3.8", "3.9"] if: ${{ github.actor != 'dependabot[bot]' }} From f9fd90b417dc77ec304d755d9f32a13d37f41b30 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=BAben=20Fonseca?= Date: Fri, 27 Jan 2023 11:53:54 +0100 Subject: [PATCH 06/11] fix: parameters test --- tests/e2e/parameters/infrastructure.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/e2e/parameters/infrastructure.py b/tests/e2e/parameters/infrastructure.py index e2cd5101ba7..018fceab2aa 100644 --- a/tests/e2e/parameters/infrastructure.py +++ b/tests/e2e/parameters/infrastructure.py @@ -27,7 +27,7 @@ def create_resources(self): iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ - "ssm:GetParameter", + "ssm:GetParameters", ], resources=[f"arn:aws:ssm:{self.region}:{self.account_id}:parameter/powertools/e2e/parameters/*"], ) From 7c1e6c82a7b677d1e2a20e8df624e2e9638d6bc3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=BAben=20Fonseca?= Date: Fri, 27 Jan 2023 12:14:11 +0100 Subject: [PATCH 07/11] fix: eval --- tests/e2e/utils/functions.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/e2e/utils/functions.py b/tests/e2e/utils/functions.py index de71a0b866c..3b0d533144e 100644 --- a/tests/e2e/utils/functions.py +++ b/tests/e2e/utils/functions.py @@ -6,8 +6,8 @@ def execute_lambdas_in_parallel(function_name: str, lambdas_arn: list, arguments: str): - def f(function_name, arn, arguments): - eval(function_name)(arn, arguments) + def f(fname: str, farn: str, fargs: str): + return eval(fname)(farn, fargs) result_list = [] with ThreadPoolExecutor() as executor: From 4fd4391e430ba87d111cef171a44502dcd9b650d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=BAben=20Fonseca?= Date: Fri, 27 Jan 2023 13:48:58 +0100 Subject: [PATCH 08/11] chore: document --- parallel_run_e2e.py | 2 +- tests/e2e/conftest.py | 9 +++++++++ tests/e2e/utils/functions.py | 3 +++ 3 files changed, 13 insertions(+), 1 deletion(-) diff --git a/parallel_run_e2e.py b/parallel_run_e2e.py index 1f96aa7b9fc..1146f66931e 100755 --- a/parallel_run_e2e.py +++ b/parallel_run_e2e.py @@ -8,7 +8,7 @@ def main(): features = Path("tests/e2e").rglob("infrastructure.py") workers = len(list(features)) - 1 - command = f"poetry run pytest -n {workers} --dist loadgroup -o log_cli=true tests/e2e" + command = f"poetry run pytest -n {workers} -o log_cli=true tests/e2e" result = subprocess.run(command.split(), shell=False) sys.exit(result.returncode) diff --git a/tests/e2e/conftest.py b/tests/e2e/conftest.py index f59eea9a33b..49fdffc18f5 100644 --- a/tests/e2e/conftest.py +++ b/tests/e2e/conftest.py @@ -1,4 +1,5 @@ import pytest +from xdist.scheduler import LoadGroupScheduling from tests.e2e.utils.infrastructure import call_once from tests.e2e.utils.lambda_layer.powertools_layer import LocalLambdaPowertoolsLayer @@ -27,3 +28,11 @@ def lambda_layer_build(tmp_path_factory: pytest.TempPathFactory, worker_id: str) tmp_path_factory=tmp_path_factory, worker_id=worker_id, ) + + +# Hook to use the xdist_group to decide how to schedule tests among the different workers. +# Each test needs to be marked with @pytest.mark.xdist_group(name=...). This way, all +# the tests on each directory will be scheduled on the same worker, thus sharing the +# infrastructure. +def pytest_xdist_make_scheduler(config, log): + return LoadGroupScheduling(config, log) diff --git a/tests/e2e/utils/functions.py b/tests/e2e/utils/functions.py index 3b0d533144e..219609f6d88 100644 --- a/tests/e2e/utils/functions.py +++ b/tests/e2e/utils/functions.py @@ -13,6 +13,9 @@ def f(fname: str, farn: str, fargs: str): with ThreadPoolExecutor() as executor: running_tasks: List[Future] = [] for arn in lambdas_arn: + # Sleep 0.5, 1, 1.5, ... seconds between each invocation. This way + # we can guarantee that lambdas are executed in parallel, but they are + # called in the same "order" as they are passed in. time.sleep(0.5 * len(running_tasks)) running_tasks.append(executor.submit(f, function_name, arn, arguments)) From 2bfc558f0646cf4d136d7df77755449d66dc3d9f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=BAben=20Fonseca?= Date: Fri, 27 Jan 2023 19:32:11 +0100 Subject: [PATCH 09/11] fix: simplified parallel lambdas --- tests/e2e/utils/functions.py | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/tests/e2e/utils/functions.py b/tests/e2e/utils/functions.py index 219609f6d88..db70f4ab6b9 100644 --- a/tests/e2e/utils/functions.py +++ b/tests/e2e/utils/functions.py @@ -6,18 +6,23 @@ def execute_lambdas_in_parallel(function_name: str, lambdas_arn: list, arguments: str): - def f(fname: str, farn: str, fargs: str): - return eval(fname)(farn, fargs) - result_list = [] with ThreadPoolExecutor() as executor: running_tasks: List[Future] = [] for arn in lambdas_arn: # Sleep 0.5, 1, 1.5, ... seconds between each invocation. This way # we can guarantee that lambdas are executed in parallel, but they are - # called in the same "order" as they are passed in. + # called in the same "order" as they are passed in, thus guaranteeing that + # we can assert on the correct output. time.sleep(0.5 * len(running_tasks)) - running_tasks.append(executor.submit(f, function_name, arn, arguments)) + running_tasks.append( + executor.submit( + lambda lname, larn, largs: eval(lname)(larn, largs), + function_name, + arn, + arguments, + ) + ) executor.shutdown(wait=True) From 474932b51a2e289aaf029c6e7f05db81b0a63ba3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=BAben=20Fonseca?= Date: Mon, 30 Jan 2023 09:27:31 +0100 Subject: [PATCH 10/11] fix: move dist=group option to pytest.ini --- pyproject.toml | 2 +- tests/e2e/conftest.py | 9 --------- 2 files changed, 1 insertion(+), 10 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 7854d1810a9..3adaedd872f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -155,7 +155,7 @@ exclude = ''' [tool.pytest.ini_options] minversion = "6.0" -addopts = "-ra -vv" +addopts = "-ra -vv --dist loadgroup" # loadgroup is essential for e2e tests to run correctly testpaths = "./tests" markers = [ "perf: marks perf tests to be deselected (deselect with '-m \"not perf\"')", diff --git a/tests/e2e/conftest.py b/tests/e2e/conftest.py index 49fdffc18f5..f59eea9a33b 100644 --- a/tests/e2e/conftest.py +++ b/tests/e2e/conftest.py @@ -1,5 +1,4 @@ import pytest -from xdist.scheduler import LoadGroupScheduling from tests.e2e.utils.infrastructure import call_once from tests.e2e.utils.lambda_layer.powertools_layer import LocalLambdaPowertoolsLayer @@ -28,11 +27,3 @@ def lambda_layer_build(tmp_path_factory: pytest.TempPathFactory, worker_id: str) tmp_path_factory=tmp_path_factory, worker_id=worker_id, ) - - -# Hook to use the xdist_group to decide how to schedule tests among the different workers. -# Each test needs to be marked with @pytest.mark.xdist_group(name=...). This way, all -# the tests on each directory will be scheduled on the same worker, thus sharing the -# infrastructure. -def pytest_xdist_make_scheduler(config, log): - return LoadGroupScheduling(config, log) From c4e97c784ceadc265adf93189437f1ceafa9c91d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=BAben=20Fonseca?= Date: Mon, 30 Jan 2023 10:14:26 +0100 Subject: [PATCH 11/11] fix: apply loadgroup only to e2e tests --- pyproject.toml | 2 +- tests/e2e/pytest.ini | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 tests/e2e/pytest.ini diff --git a/pyproject.toml b/pyproject.toml index 3adaedd872f..7854d1810a9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -155,7 +155,7 @@ exclude = ''' [tool.pytest.ini_options] minversion = "6.0" -addopts = "-ra -vv --dist loadgroup" # loadgroup is essential for e2e tests to run correctly +addopts = "-ra -vv" testpaths = "./tests" markers = [ "perf: marks perf tests to be deselected (deselect with '-m \"not perf\"')", diff --git a/tests/e2e/pytest.ini b/tests/e2e/pytest.ini new file mode 100644 index 00000000000..3fc35fa5847 --- /dev/null +++ b/tests/e2e/pytest.ini @@ -0,0 +1,2 @@ +[pytest] +addopts = -ra -vv --dist loadgroup \ No newline at end of file