diff --git a/.github/workflows/posix.yml b/.github/workflows/posix.yml index 4beba743209b6..8080a81519d8f 100644 --- a/.github/workflows/posix.yml +++ b/.github/workflows/posix.yml @@ -50,7 +50,7 @@ jobs: COVERAGE: ${{ !contains(matrix.settings[0], 'pypy') }} concurrency: # https://github.community/t/concurrecy-not-work-for-push/183068/7 - group: ${{ github.event_name == 'push' && github.run_number || github.ref }}-${{ matrix.settings[0] }}-${{ matrix.settings[1] }} + group: ${{ github.event_name == 'push' && github.run_number || github.ref }}-${{ matrix.settings[0] }}-${{ matrix.settings[1] }}-${{ matrix.settings[2] }} cancel-in-progress: true services: diff --git a/pandas/tests/base/test_unique.py b/pandas/tests/base/test_unique.py index ac21c2f979dd4..e399ae45160fc 100644 --- a/pandas/tests/base/test_unique.py +++ b/pandas/tests/base/test_unique.py @@ -105,6 +105,9 @@ def test_nunique_null(null_obj, index_or_series_obj): @pytest.mark.single +@pytest.mark.xfail( + reason="Flaky in the CI. Remove once CI has a single build: GH 44584", strict=False +) def test_unique_bad_unicode(index_or_series): # regression test for #34550 uval = "\ud83d" # smiley emoji diff --git a/pandas/tests/io/parser/test_network.py b/pandas/tests/io/parser/test_network.py index d4c3c93a32af0..6b08ea4da8f56 100644 --- a/pandas/tests/io/parser/test_network.py +++ b/pandas/tests/io/parser/test_network.py @@ -7,6 +7,7 @@ StringIO, ) import logging +import os import numpy as np import pytest @@ -264,6 +265,12 @@ def test_read_csv_handles_boto_s3_object(self, s3_resource, tips_file): expected = read_csv(tips_file) tm.assert_frame_equal(result, expected) + @pytest.mark.skipif( + os.environ.get("PANDAS_CI", "0") == "1", + reason="This test can hang in our CI min_versions build " + "and leads to '##[error]The runner has " + "received a shutdown signal...' in GHA. GH: 45651", + ) def test_read_csv_chunked_download(self, s3_resource, caplog, s3so): # 8 MB, S3FS uses 5MB chunks import s3fs diff --git a/pandas/tests/io/test_fsspec.py b/pandas/tests/io/test_fsspec.py index 172065755d4b7..f1040c0bd30f2 100644 --- a/pandas/tests/io/test_fsspec.py +++ b/pandas/tests/io/test_fsspec.py @@ -3,7 +3,6 @@ import numpy as np import pytest -from pandas.compat import PY310 from pandas.compat._optional import VERSIONS from pandas import ( @@ -182,7 +181,6 @@ def test_arrowparquet_options(fsspectest): @td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) fastparquet @td.skip_if_no("fastparquet") -@pytest.mark.xfail(PY310, reason="fastparquet failing on 3.10") def test_fastparquet_options(fsspectest): """Regression test for writing to a not-yet-existent GCS Parquet file.""" df = DataFrame({"a": [0]}) diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py index a1a39a1cf8881..2eb8738d88b41 100644 --- a/pandas/tests/io/test_parquet.py +++ b/pandas/tests/io/test_parquet.py @@ -13,10 +13,7 @@ from pandas._config import get_option -from pandas.compat import ( - PY310, - is_platform_windows, -) +from pandas.compat import is_platform_windows from pandas.compat.pyarrow import ( pa_version_under2p0, pa_version_under5p0, @@ -265,7 +262,6 @@ def test_options_py(df_compat, pa): check_round_trip(df_compat) -@pytest.mark.xfail(PY310, reason="fastparquet failing on 3.10") def test_options_fp(df_compat, fp): # use the set option @@ -343,7 +339,6 @@ def test_get_engine_auto_error_message(): get_engine("auto") -@pytest.mark.xfail(PY310, reason="fastparquet failing on 3.10") def test_cross_engine_pa_fp(df_cross_compat, pa, fp): # cross-compat with differing reading/writing engines @@ -409,11 +404,7 @@ def test_error(self, engine): msg = "to_parquet only supports IO with DataFrames" self.check_error_on_write(obj, engine, ValueError, msg) - def test_columns_dtypes(self, request, engine): - if PY310 and engine == "fastparquet": - request.node.add_marker( - pytest.mark.xfail(reason="fastparquet failing on 3.10") - ) + def test_columns_dtypes(self, engine): df = pd.DataFrame({"string": list("abc"), "int": list(range(1, 4))}) # unicode @@ -440,7 +431,7 @@ def test_columns_dtypes_invalid(self, engine): self.check_error_on_write(df, engine, ValueError, msg) @pytest.mark.parametrize("compression", [None, "gzip", "snappy", "brotli"]) - def test_compression(self, engine, compression, request): + def test_compression(self, engine, compression): if compression == "snappy": pytest.importorskip("snappy") @@ -448,19 +439,11 @@ def test_compression(self, engine, compression, request): elif compression == "brotli": pytest.importorskip("brotli") - if PY310 and engine == "fastparquet": - request.node.add_marker( - pytest.mark.xfail(reason="fastparquet failing on 3.10") - ) df = pd.DataFrame({"A": [1, 2, 3]}) check_round_trip(df, engine, write_kwargs={"compression": compression}) - def test_read_columns(self, engine, request): + def test_read_columns(self, engine): # GH18154 - if PY310 and engine == "fastparquet": - request.node.add_marker( - pytest.mark.xfail(reason="fastparquet failing on 3.10") - ) df = pd.DataFrame({"string": list("abc"), "int": list(range(1, 4))}) expected = pd.DataFrame({"string": list("abc")}) @@ -468,11 +451,7 @@ def test_read_columns(self, engine, request): df, engine, expected=expected, read_kwargs={"columns": ["string"]} ) - def test_write_index(self, engine, request): - if PY310 and engine == "fastparquet": - request.node.add_marker( - pytest.mark.xfail(reason="fastparquet failing on 3.10") - ) + def test_write_index(self, engine): check_names = engine != "fastparquet" df = pd.DataFrame({"A": [1, 2, 3]}) @@ -521,13 +500,9 @@ def test_multiindex_with_columns(self, pa): df, engine, read_kwargs={"columns": ["A", "B"]}, expected=df[["A", "B"]] ) - def test_write_ignoring_index(self, engine, request): + def test_write_ignoring_index(self, engine): # ENH 20768 # Ensure index=False omits the index from the written Parquet file. - if PY310 and engine == "fastparquet": - request.node.add_marker( - pytest.mark.xfail(reason="fastparquet failing on 3.10") - ) df = pd.DataFrame({"a": [1, 2, 3], "b": ["q", "r", "s"]}) write_kwargs = {"compression": None, "index": False} @@ -1011,7 +986,6 @@ def test_read_parquet_manager(self, pa, using_array_manager): class TestParquetFastParquet(Base): - @pytest.mark.xfail(PY310, reason="fastparquet failing on 3.10") def test_basic(self, fp, df_full): df = df_full @@ -1029,7 +1003,6 @@ def test_duplicate_columns(self, fp): msg = "Cannot create parquet dataset with duplicate column names" self.check_error_on_write(df, fp, ValueError, msg) - @pytest.mark.xfail(PY310, reason="fastparquet failing on 3.10") def test_bool_with_none(self, fp): df = pd.DataFrame({"a": [True, None, False]}) expected = pd.DataFrame({"a": [1.0, np.nan, 0.0]}, dtype="float16") @@ -1049,12 +1022,10 @@ def test_unsupported(self, fp): msg = "Can't infer object conversion type" self.check_error_on_write(df, fp, ValueError, msg) - @pytest.mark.xfail(PY310, reason="fastparquet failing on 3.10") def test_categorical(self, fp): df = pd.DataFrame({"a": pd.Categorical(list("abc"))}) check_round_trip(df, fp) - @pytest.mark.xfail(PY310, reason="fastparquet failing on 3.10") def test_filter_row_groups(self, fp): d = {"a": list(range(0, 3))} df = pd.DataFrame(d) @@ -1073,7 +1044,6 @@ def test_s3_roundtrip(self, df_compat, s3_resource, fp, s3so): write_kwargs={"compression": None, "storage_options": s3so}, ) - @pytest.mark.xfail(PY310, reason="fastparquet failing on 3.10") def test_partition_cols_supported(self, fp, df_full): # GH #23283 partition_cols = ["bool", "int"] @@ -1091,7 +1061,6 @@ def test_partition_cols_supported(self, fp, df_full): actual_partition_cols = fastparquet.ParquetFile(path, False).cats assert len(actual_partition_cols) == 2 - @pytest.mark.xfail(PY310, reason="fastparquet failing on 3.10") def test_partition_cols_string(self, fp, df_full): # GH #27117 partition_cols = "bool" @@ -1109,7 +1078,6 @@ def test_partition_cols_string(self, fp, df_full): actual_partition_cols = fastparquet.ParquetFile(path, False).cats assert len(actual_partition_cols) == 1 - @pytest.mark.xfail(PY310, reason="fastparquet failing on 3.10") def test_partition_on_supported(self, fp, df_full): # GH #23283 partition_cols = ["bool", "int"] @@ -1145,7 +1113,6 @@ def test_error_on_using_partition_cols_and_partition_on(self, fp, df_full): partition_cols=partition_cols, ) - @pytest.mark.xfail(PY310, reason="fastparquet failing on 3.10") def test_empty_dataframe(self, fp): # GH #27339 df = pd.DataFrame() @@ -1153,7 +1120,6 @@ def test_empty_dataframe(self, fp): expected.index.name = "index" check_round_trip(df, fp, expected=expected) - @pytest.mark.xfail(PY310, reason="fastparquet failing on 3.10") def test_timezone_aware_index(self, fp, timezone_aware_date_list): idx = 5 * [timezone_aware_date_list] diff --git a/pandas/tests/io/test_user_agent.py b/pandas/tests/io/test_user_agent.py index 78f2365a09d4c..a5869e919f478 100644 --- a/pandas/tests/io/test_user_agent.py +++ b/pandas/tests/io/test_user_agent.py @@ -5,18 +5,25 @@ import http.server from io import BytesIO import multiprocessing +import os import socket import time import urllib.error import pytest -from pandas.compat import PY310 import pandas.util._test_decorators as td import pandas as pd import pandas._testing as tm +pytestmark = pytest.mark.skipif( + os.environ.get("PANDAS_CI", "0") == "1", + reason="This test can hang in our CI min_versions build " + "and leads to '##[error]The runner has " + "received a shutdown signal...' in GHA. GH 45651", +) + class BaseUserAgentResponder(http.server.BaseHTTPRequestHandler): """ @@ -245,7 +252,6 @@ def responder(request): # TODO(ArrayManager) fastparquet marks=[ td.skip_array_manager_not_yet_implemented, - pytest.mark.xfail(PY310, reason="fastparquet failing on 3.10"), ], ), (PickleUserAgentResponder, pd.read_pickle, None), @@ -283,7 +289,6 @@ def test_server_and_default_headers(responder, read_method, parquet_engine): # TODO(ArrayManager) fastparquet marks=[ td.skip_array_manager_not_yet_implemented, - pytest.mark.xfail(PY310, reason="fastparquet failing on 3.10"), ], ), (PickleUserAgentResponder, pd.read_pickle, None),