From 5396753cb4099004c921391b415923173ed7fc90 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 6 Nov 2023 16:29:25 +0000 Subject: [PATCH 1/6] [pre-commit.ci] pre-commit autoupdate MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/hauntsaninja/black-pre-commit-mirror: 23.9.1 → 23.10.1](https://github.com/hauntsaninja/black-pre-commit-mirror/compare/23.9.1...23.10.1) - [github.com/astral-sh/ruff-pre-commit: v0.0.291 → v0.1.4](https://github.com/astral-sh/ruff-pre-commit/compare/v0.0.291...v0.1.4) - [github.com/jendrikseipp/vulture: v2.9.1 → v2.10](https://github.com/jendrikseipp/vulture/compare/v2.9.1...v2.10) - [github.com/codespell-project/codespell: v2.2.5 → v2.2.6](https://github.com/codespell-project/codespell/compare/v2.2.5...v2.2.6) - [github.com/pre-commit/pre-commit-hooks: v4.4.0 → v4.5.0](https://github.com/pre-commit/pre-commit-hooks/compare/v4.4.0...v4.5.0) - [github.com/pylint-dev/pylint: v3.0.0b0 → v3.0.1](https://github.com/pylint-dev/pylint/compare/v3.0.0b0...v3.0.1) - [github.com/asottile/pyupgrade: v3.13.0 → v3.15.0](https://github.com/asottile/pyupgrade/compare/v3.13.0...v3.15.0) - [github.com/sphinx-contrib/sphinx-lint: v0.6.8 → v0.8.1](https://github.com/sphinx-contrib/sphinx-lint/compare/v0.6.8...v0.8.1) - [github.com/pre-commit/mirrors-clang-format: ea59a72 → v17.0.4](https://github.com/pre-commit/mirrors-clang-format/compare/ea59a72...v17.0.4) --- .pre-commit-config.yaml | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index a9a9baac6069a..753aefcc00527 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -20,11 +20,11 @@ ci: repos: - repo: https://github.com/hauntsaninja/black-pre-commit-mirror # black compiled with mypyc - rev: 23.9.1 + rev: 23.10.1 hooks: - id: black - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.0.291 + rev: v0.1.4 hooks: - id: ruff args: [--exit-non-zero-on-fix] @@ -34,14 +34,14 @@ repos: alias: ruff-selected-autofixes args: [--select, "ANN001,ANN204", --fix-only, --exit-non-zero-on-fix] - repo: https://github.com/jendrikseipp/vulture - rev: 'v2.9.1' + rev: 'v2.10' hooks: - id: vulture entry: python scripts/run_vulture.py pass_filenames: true require_serial: false - repo: https://github.com/codespell-project/codespell - rev: v2.2.5 + rev: v2.2.6 hooks: - id: codespell types_or: [python, rst, markdown, cython, c] @@ -52,7 +52,7 @@ repos: - id: cython-lint - id: double-quote-cython-strings - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.4.0 + rev: v4.5.0 hooks: - id: check-ast - id: check-case-conflict @@ -71,7 +71,7 @@ repos: args: [--remove] - id: trailing-whitespace - repo: https://github.com/pylint-dev/pylint - rev: v3.0.0b0 + rev: v3.0.1 hooks: - id: pylint stages: [manual] @@ -94,7 +94,7 @@ repos: hooks: - id: isort - repo: https://github.com/asottile/pyupgrade - rev: v3.13.0 + rev: v3.15.0 hooks: - id: pyupgrade args: [--py39-plus] @@ -111,11 +111,11 @@ repos: types: [text] # overwrite types: [rst] types_or: [python, rst] - repo: https://github.com/sphinx-contrib/sphinx-lint - rev: v0.6.8 + rev: v0.8.1 hooks: - id: sphinx-lint - repo: https://github.com/pre-commit/mirrors-clang-format - rev: ea59a72 + rev: v17.0.4 hooks: - id: clang-format files: ^pandas/_libs/src|^pandas/_libs/include From 5854575fdc78d42c0f2f7a662286fdb846afb604 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 6 Nov 2023 16:35:06 +0000 Subject: [PATCH 2/6] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- pandas/__init__.py | 2 +- pandas/_libs/__init__.py | 4 ++-- pandas/_libs/include/pandas/portable.h | 2 +- pandas/_libs/src/vendored/ujson/lib/ultrajsonenc.c | 2 +- pandas/core/arrays/datetimes.py | 2 +- pandas/core/arrays/timedeltas.py | 2 +- pandas/core/groupby/groupby.py | 2 +- pandas/core/indexes/base.py | 2 +- pandas/core/interchange/dataframe.py | 2 +- pandas/core/internals/blocks.py | 2 +- pandas/core/reshape/merge.py | 4 ++-- pandas/core/tools/numeric.py | 2 +- pandas/io/parquet.py | 4 ++-- pandas/io/parsers/base_parser.py | 6 +++--- pandas/tests/io/parser/common/test_common_basic.py | 2 +- pandas/tests/util/test_hashing.py | 4 ++-- pandas/util/_decorators.py | 2 +- 17 files changed, 23 insertions(+), 23 deletions(-) diff --git a/pandas/__init__.py b/pandas/__init__.py index 41e34309232ee..7fab662ed2de4 100644 --- a/pandas/__init__.py +++ b/pandas/__init__.py @@ -24,7 +24,7 @@ try: # numpy compat from pandas.compat import ( - is_numpy_dev as _is_numpy_dev, # pyright: ignore[reportUnusedImport] # noqa: F401,E501 + is_numpy_dev as _is_numpy_dev, # pyright: ignore[reportUnusedImport] # noqa: F401 ) except ImportError as _err: # pragma: no cover _module = _err.name diff --git a/pandas/_libs/__init__.py b/pandas/_libs/__init__.py index b084a25917163..26a872a90e493 100644 --- a/pandas/_libs/__init__.py +++ b/pandas/_libs/__init__.py @@ -13,8 +13,8 @@ # Below imports needs to happen first to ensure pandas top level # module gets monkeypatched with the pandas_datetime_CAPI # see pandas_datetime_exec in pd_datetime.c -import pandas._libs.pandas_parser # noqa: E501 # isort: skip # type: ignore[reportUnusedImport] -import pandas._libs.pandas_datetime # noqa: F401,E501 # isort: skip # type: ignore[reportUnusedImport] +import pandas._libs.pandas_parser # isort: skip # type: ignore[reportUnusedImport] +import pandas._libs.pandas_datetime # noqa: F401 # isort: skip # type: ignore[reportUnusedImport] from pandas._libs.interval import Interval from pandas._libs.tslibs import ( NaT, diff --git a/pandas/_libs/include/pandas/portable.h b/pandas/_libs/include/pandas/portable.h index 588f070372a8a..be9080172fe42 100644 --- a/pandas/_libs/include/pandas/portable.h +++ b/pandas/_libs/include/pandas/portable.h @@ -21,5 +21,5 @@ The full license is in the LICENSE file, distributed with this software. #define getdigit_ascii(c, default) \ (isdigit_ascii(c) ? ((int)((c) - '0')) : default) #define isspace_ascii(c) (((c) == ' ') || (((unsigned)(c) - '\t') < 5)) -#define toupper_ascii(c) ((((unsigned)(c) - 'a') < 26) ? ((c)&0x5f) : (c)) +#define toupper_ascii(c) ((((unsigned)(c) - 'a') < 26) ? ((c) & 0x5f) : (c)) #define tolower_ascii(c) ((((unsigned)(c) - 'A') < 26) ? ((c) | 0x20) : (c)) diff --git a/pandas/_libs/src/vendored/ujson/lib/ultrajsonenc.c b/pandas/_libs/src/vendored/ujson/lib/ultrajsonenc.c index 917af4872ecfe..42dfa113e6279 100644 --- a/pandas/_libs/src/vendored/ujson/lib/ultrajsonenc.c +++ b/pandas/_libs/src/vendored/ujson/lib/ultrajsonenc.c @@ -72,7 +72,7 @@ or UTF-16 surrogate pairs The extra 2 bytes are for the quotes around the string */ -#define RESERVE_STRING(_len) (2 + ((_len)*6)) +#define RESERVE_STRING(_len) (2 + ((_len) * 6)) static const double g_pow10[] = {1, 10, diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 8305da745ab00..7e659eff5cd6e 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -559,7 +559,7 @@ def _box_func(self, x: np.datetime64) -> Timestamp | NaTType: # error: Return type "Union[dtype, DatetimeTZDtype]" of "dtype" # incompatible with return type "ExtensionDtype" in supertype # "ExtensionArray" - def dtype(self) -> np.dtype[np.datetime64] | DatetimeTZDtype: # type: ignore[override] # noqa: E501 + def dtype(self) -> np.dtype[np.datetime64] | DatetimeTZDtype: # type: ignore[override] """ The dtype for the DatetimeArray. diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index 079ac3562c3d9..b64ab1154ef96 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -92,7 +92,7 @@ def f(self) -> np.ndarray: # error: Incompatible types in assignment ( # expression has type "ndarray[Any, dtype[signedinteger[_32Bit]]]", # variable has type "ndarray[Any, dtype[signedinteger[_64Bit]]] - result = get_timedelta_field(values, alias, reso=self._creso) # type: ignore[assignment] # noqa: E501 + result = get_timedelta_field(values, alias, reso=self._creso) # type: ignore[assignment] if self._hasna: result = self._maybe_mask_results( result, fill_value=None, convert="float64" diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index b9b69d4ef0c87..14e3decd12185 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -2864,7 +2864,7 @@ def _value_counts( result_series.name = name result_series.index = index.set_names(range(len(columns))) result_frame = result_series.reset_index() - orig_dtype = self.grouper.groupings[0].obj.columns.dtype # type: ignore[union-attr] # noqa: E501 + orig_dtype = self.grouper.groupings[0].obj.columns.dtype # type: ignore[union-attr] cols = Index(columns, dtype=orig_dtype).insert(len(columns), name) result_frame.columns = cols result = result_frame diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index ebf4f2d515956..28566b933d993 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -5374,7 +5374,7 @@ def _getitem_slice(self, slobj: slice) -> Self: result = type(self)._simple_new(res, name=self._name, refs=self._references) if "_engine" in self._cache: reverse = slobj.step is not None and slobj.step < 0 - result._engine._update_from_sliced(self._engine, reverse=reverse) # type: ignore[union-attr] # noqa: E501 + result._engine._update_from_sliced(self._engine, reverse=reverse) # type: ignore[union-attr] return result diff --git a/pandas/core/interchange/dataframe.py b/pandas/core/interchange/dataframe.py index 0ddceb6b8139b..36f374152709d 100644 --- a/pandas/core/interchange/dataframe.py +++ b/pandas/core/interchange/dataframe.py @@ -87,7 +87,7 @@ def select_columns(self, indices: Sequence[int]) -> PandasDataFrameXchg: self._df.iloc[:, indices], self._nan_as_null, self._allow_copy ) - def select_columns_by_name(self, names: list[str]) -> PandasDataFrameXchg: # type: ignore[override] # noqa: E501 + def select_columns_by_name(self, names: list[str]) -> PandasDataFrameXchg: # type: ignore[override] if not isinstance(names, abc.Sequence): raise ValueError("`names` is not a sequence") if not isinstance(names, list): diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 330effe0f0a9f..83dd9f90a698f 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -1786,7 +1786,7 @@ def delete(self, loc) -> list[Block]: else: # No overload variant of "__getitem__" of "ExtensionArray" matches # argument type "Tuple[slice, slice]" - values = self.values[previous_loc + 1 : idx, :] # type: ignore[call-overload] # noqa: E501 + values = self.values[previous_loc + 1 : idx, :] # type: ignore[call-overload] locs = mgr_locs_arr[previous_loc + 1 : idx] nb = type(self)( values, placement=BlockPlacement(locs), ndim=self.ndim, refs=refs diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 8bddde9c05dad..f83a12b268b22 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -1390,13 +1390,13 @@ def _maybe_coerce_merge_keys(self) -> None: ): ct = find_common_type([lk.dtype, rk.dtype]) if is_extension_array_dtype(ct): - rk = ct.construct_array_type()._from_sequence(rk) # type: ignore[union-attr] # noqa: E501 + rk = ct.construct_array_type()._from_sequence(rk) # type: ignore[union-attr] else: rk = rk.astype(ct) # type: ignore[arg-type] elif is_extension_array_dtype(rk.dtype): ct = find_common_type([lk.dtype, rk.dtype]) if is_extension_array_dtype(ct): - lk = ct.construct_array_type()._from_sequence(lk) # type: ignore[union-attr] # noqa: E501 + lk = ct.construct_array_type()._from_sequence(lk) # type: ignore[union-attr] else: lk = lk.astype(ct) # type: ignore[arg-type] diff --git a/pandas/core/tools/numeric.py b/pandas/core/tools/numeric.py index 8a6ef41b2a540..c5a2736d4f926 100644 --- a/pandas/core/tools/numeric.py +++ b/pandas/core/tools/numeric.py @@ -229,7 +229,7 @@ def to_numeric( values = ensure_object(values) coerce_numeric = errors not in ("ignore", "raise") try: - values, new_mask = lib.maybe_convert_numeric( # type: ignore[call-overload] # noqa: E501 + values, new_mask = lib.maybe_convert_numeric( # type: ignore[call-overload] values, set(), coerce_numeric=coerce_numeric, diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py index ed254191d2736..0785f14c6839d 100644 --- a/pandas/io/parquet.py +++ b/pandas/io/parquet.py @@ -166,7 +166,7 @@ def __init__(self) -> None: import pyarrow.parquet # import utils to register the pyarrow extension types - import pandas.core.arrays.arrow.extension_types # pyright: ignore[reportUnusedImport] # noqa: F401,E501 + import pandas.core.arrays.arrow.extension_types # pyright: ignore[reportUnusedImport] # noqa: F401 self.api = pyarrow @@ -255,7 +255,7 @@ def read( mapping = _arrow_dtype_mapping() to_pandas_kwargs["types_mapper"] = mapping.get elif dtype_backend == "pyarrow": - to_pandas_kwargs["types_mapper"] = pd.ArrowDtype # type: ignore[assignment] # noqa: E501 + to_pandas_kwargs["types_mapper"] = pd.ArrowDtype # type: ignore[assignment] elif using_pyarrow_string_dtype(): to_pandas_kwargs["types_mapper"] = arrow_string_types_mapper() diff --git a/pandas/io/parsers/base_parser.py b/pandas/io/parsers/base_parser.py index 86ec62d2b19b6..4fb97779c690e 100644 --- a/pandas/io/parsers/base_parser.py +++ b/pandas/io/parsers/base_parser.py @@ -711,7 +711,7 @@ def _infer_types( values, na_values, False, - convert_to_masked_nullable=non_default_dtype_backend, # type: ignore[arg-type] # noqa: E501 + convert_to_masked_nullable=non_default_dtype_backend, # type: ignore[arg-type] ) except (ValueError, TypeError): # e.g. encountering datetime string gets ValueError @@ -747,7 +747,7 @@ def _infer_types( np.asarray(values), true_values=self.true_values, false_values=self.false_values, - convert_to_masked_nullable=non_default_dtype_backend, # type: ignore[arg-type] # noqa: E501 + convert_to_masked_nullable=non_default_dtype_backend, # type: ignore[arg-type] ) if result.dtype == np.bool_ and non_default_dtype_backend: if bool_mask is None: @@ -810,7 +810,7 @@ def _cast_types(self, values: ArrayLike, cast_type: DtypeObj, column) -> ArrayLi if isinstance(cast_type, BooleanDtype): # error: Unexpected keyword argument "true_values" for # "_from_sequence_of_strings" of "ExtensionArray" - return array_type._from_sequence_of_strings( # type: ignore[call-arg] # noqa: E501 + return array_type._from_sequence_of_strings( # type: ignore[call-arg] values, dtype=cast_type, true_values=self.true_values, diff --git a/pandas/tests/io/parser/common/test_common_basic.py b/pandas/tests/io/parser/common/test_common_basic.py index 4b4366fa387bf..350bc3f97796a 100644 --- a/pandas/tests/io/parser/common/test_common_basic.py +++ b/pandas/tests/io/parser/common/test_common_basic.py @@ -365,7 +365,7 @@ def test_escapechar(all_parsers): data = '''SEARCH_TERM,ACTUAL_URL "bra tv board","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord" "tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord" -"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals series","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"''' # noqa: E501 +"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals series","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"''' parser = all_parsers result = parser.read_csv( diff --git a/pandas/tests/util/test_hashing.py b/pandas/tests/util/test_hashing.py index e78b042a09231..00dc184a0ac4d 100644 --- a/pandas/tests/util/test_hashing.py +++ b/pandas/tests/util/test_hashing.py @@ -339,8 +339,8 @@ def test_hash_collisions(): # # https://github.com/pandas-dev/pandas/issues/14711#issuecomment-264885726 hashes = [ - "Ingrid-9Z9fKIZmkO7i7Cn51Li34pJm44fgX6DYGBNj3VPlOH50m7HnBlPxfIwFMrcNJNMP6PSgLmwWnInciMWrCSAlLEvt7JkJl4IxiMrVbXSa8ZQoVaq5xoQPjltuJEfwdNlO6jo8qRRHvD8sBEBMQASrRa6TsdaPTPCBo3nwIBpE7YzzmyH0vMBhjQZLx1aCT7faSEx7PgFxQhHdKFWROcysamgy9iVj8DO2Fmwg1NNl93rIAqC3mdqfrCxrzfvIY8aJdzin2cHVzy3QUJxZgHvtUtOLxoqnUHsYbNTeq0xcLXpTZEZCxD4PGubIuCNf32c33M7HFsnjWSEjE2yVdWKhmSVodyF8hFYVmhYnMCztQnJrt3O8ZvVRXd5IKwlLexiSp4h888w7SzAIcKgc3g5XQJf6MlSMftDXm9lIsE1mJNiJEv6uY6pgvC3fUPhatlR5JPpVAHNSbSEE73MBzJrhCAbOLXQumyOXigZuPoME7QgJcBalliQol7YZ9", # noqa: E501 - "Tim-b9MddTxOWW2AT1Py6vtVbZwGAmYCjbp89p8mxsiFoVX4FyDOF3wFiAkyQTUgwg9sVqVYOZo09Dh1AzhFHbgij52ylF0SEwgzjzHH8TGY8Lypart4p4onnDoDvVMBa0kdthVGKl6K0BDVGzyOXPXKpmnMF1H6rJzqHJ0HywfwS4XYpVwlAkoeNsiicHkJUFdUAhG229INzvIAiJuAHeJDUoyO4DCBqtoZ5TDend6TK7Y914yHlfH3g1WZu5LksKv68VQHJriWFYusW5e6ZZ6dKaMjTwEGuRgdT66iU5nqWTHRH8WSzpXoCFwGcTOwyuqPSe0fTe21DVtJn1FKj9F9nEnR9xOvJUO7E0piCIF4Ad9yAIDY4DBimpsTfKXCu1vdHpKYerzbndfuFe5AhfMduLYZJi5iAw8qKSwR5h86ttXV0Mc0QmXz8dsRvDgxjXSmupPxBggdlqUlC828hXiTPD7am0yETBV0F3bEtvPiNJfremszcV8NcqAoARMe", # noqa: E501 + "Ingrid-9Z9fKIZmkO7i7Cn51Li34pJm44fgX6DYGBNj3VPlOH50m7HnBlPxfIwFMrcNJNMP6PSgLmwWnInciMWrCSAlLEvt7JkJl4IxiMrVbXSa8ZQoVaq5xoQPjltuJEfwdNlO6jo8qRRHvD8sBEBMQASrRa6TsdaPTPCBo3nwIBpE7YzzmyH0vMBhjQZLx1aCT7faSEx7PgFxQhHdKFWROcysamgy9iVj8DO2Fmwg1NNl93rIAqC3mdqfrCxrzfvIY8aJdzin2cHVzy3QUJxZgHvtUtOLxoqnUHsYbNTeq0xcLXpTZEZCxD4PGubIuCNf32c33M7HFsnjWSEjE2yVdWKhmSVodyF8hFYVmhYnMCztQnJrt3O8ZvVRXd5IKwlLexiSp4h888w7SzAIcKgc3g5XQJf6MlSMftDXm9lIsE1mJNiJEv6uY6pgvC3fUPhatlR5JPpVAHNSbSEE73MBzJrhCAbOLXQumyOXigZuPoME7QgJcBalliQol7YZ9", + "Tim-b9MddTxOWW2AT1Py6vtVbZwGAmYCjbp89p8mxsiFoVX4FyDOF3wFiAkyQTUgwg9sVqVYOZo09Dh1AzhFHbgij52ylF0SEwgzjzHH8TGY8Lypart4p4onnDoDvVMBa0kdthVGKl6K0BDVGzyOXPXKpmnMF1H6rJzqHJ0HywfwS4XYpVwlAkoeNsiicHkJUFdUAhG229INzvIAiJuAHeJDUoyO4DCBqtoZ5TDend6TK7Y914yHlfH3g1WZu5LksKv68VQHJriWFYusW5e6ZZ6dKaMjTwEGuRgdT66iU5nqWTHRH8WSzpXoCFwGcTOwyuqPSe0fTe21DVtJn1FKj9F9nEnR9xOvJUO7E0piCIF4Ad9yAIDY4DBimpsTfKXCu1vdHpKYerzbndfuFe5AhfMduLYZJi5iAw8qKSwR5h86ttXV0Mc0QmXz8dsRvDgxjXSmupPxBggdlqUlC828hXiTPD7am0yETBV0F3bEtvPiNJfremszcV8NcqAoARMe", ] # These should be different. diff --git a/pandas/util/_decorators.py b/pandas/util/_decorators.py index 4c2122c3fdff1..4e8189e72c427 100644 --- a/pandas/util/_decorators.py +++ b/pandas/util/_decorators.py @@ -371,7 +371,7 @@ def decorator(decorated: F) -> F: continue if hasattr(docstring, "_docstring_components"): docstring_components.extend( - docstring._docstring_components # pyright: ignore[reportGeneralTypeIssues] # noqa: E501 + docstring._docstring_components # pyright: ignore[reportGeneralTypeIssues] ) elif isinstance(docstring, str) or docstring.__doc__: docstring_components.append(docstring) From a693c1fb15b91e9974161a2edb8ae8d19f0623f3 Mon Sep 17 00:00:00 2001 From: Matthew Roeschke <10647082+mroeschke@users.noreply.github.com> Date: Mon, 6 Nov 2023 14:10:57 -0800 Subject: [PATCH 3/6] Bump black in pyproject.toml --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 85bb937fe431f..063a69213f4a7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -188,7 +188,7 @@ environment = {CFLAGS="-g0"} [tool.black] target-version = ['py39', 'py310'] -required-version = '23.9.1' +required-version = '23.10.1' exclude = ''' ( asv_bench/env From 02c3966b9b475f266e46a4d85892e5aa7f7e3063 Mon Sep 17 00:00:00 2001 From: Matthew Roeschke <10647082+mroeschke@users.noreply.github.com> Date: Mon, 6 Nov 2023 14:13:12 -0800 Subject: [PATCH 4/6] Remove unneeded noqa --- pandas/_testing/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pandas/_testing/__init__.py b/pandas/_testing/__init__.py index 17d67f05b6a68..832919db442d4 100644 --- a/pandas/_testing/__init__.py +++ b/pandas/_testing/__init__.py @@ -1059,14 +1059,14 @@ def shares_memory(left, right) -> bool: if ( isinstance(left, ExtensionArray) and is_string_dtype(left.dtype) - and left.dtype.storage in ("pyarrow", "pyarrow_numpy") # type: ignore[attr-defined] # noqa: E501 + and left.dtype.storage in ("pyarrow", "pyarrow_numpy") # type: ignore[attr-defined] ): # https://github.com/pandas-dev/pandas/pull/43930#discussion_r736862669 left = cast("ArrowExtensionArray", left) if ( isinstance(right, ExtensionArray) and is_string_dtype(right.dtype) - and right.dtype.storage in ("pyarrow", "pyarrow_numpy") # type: ignore[attr-defined] # noqa: E501 + and right.dtype.storage in ("pyarrow", "pyarrow_numpy") # type: ignore[attr-defined] ): right = cast("ArrowExtensionArray", right) left_pa_data = left._pa_array From 268b4540ebeb5a2e1144dd43368d9bdd1580fea7 Mon Sep 17 00:00:00 2001 From: Matthew Roeschke <10647082+mroeschke@users.noreply.github.com> Date: Mon, 6 Nov 2023 14:17:43 -0800 Subject: [PATCH 5/6] Manually codespelled --- doc/source/development/debugging_extensions.rst | 2 +- doc/source/development/extending.rst | 2 +- doc/source/user_guide/merging.rst | 2 +- doc/source/user_guide/reshaping.rst | 2 +- doc/source/whatsnew/v1.0.0.rst | 2 +- .../include/pandas/vendored/ujson/lib/ultrajson.h | 4 ++-- pandas/_libs/tslibs/parsing.pyx | 2 +- pandas/_libs/tslibs/timedeltas.pyx | 2 +- pandas/core/arrays/base.py | 10 +++++----- pandas/core/arrays/interval.py | 2 +- pandas/core/groupby/groupby.py | 4 ++-- pandas/core/indexing.py | 2 +- pandas/core/internals/blocks.py | 2 +- pandas/core/internals/construction.py | 2 +- pandas/core/reshape/reshape.py | 2 +- pandas/io/formats/style.py | 2 +- pandas/tests/extension/base/ops.py | 2 +- pandas/tests/frame/constructors/test_from_records.py | 2 +- pandas/tests/groupby/test_timegrouper.py | 2 +- pandas/tests/indexing/test_categorical.py | 2 +- pandas/tests/plotting/test_misc.py | 4 ++-- pandas/tests/reshape/concat/test_categorical.py | 2 +- pandas/tests/series/methods/test_infer_objects.py | 2 +- pyproject.toml | 2 +- 24 files changed, 31 insertions(+), 31 deletions(-) diff --git a/doc/source/development/debugging_extensions.rst b/doc/source/development/debugging_extensions.rst index 63154369dfd88..d63ecb3157cff 100644 --- a/doc/source/development/debugging_extensions.rst +++ b/doc/source/development/debugging_extensions.rst @@ -23,7 +23,7 @@ By default building pandas from source will generate a release build. To generat .. note:: - conda environements update CFLAGS/CPPFLAGS with flags that are geared towards generating releases. If using conda, you may need to set ``CFLAGS="$CFLAGS -O0"`` and ``CPPFLAGS="$CPPFLAGS -O0"`` to ensure optimizations are turned off for debugging + conda environments update CFLAGS/CPPFLAGS with flags that are geared towards generating releases. If using conda, you may need to set ``CFLAGS="$CFLAGS -O0"`` and ``CPPFLAGS="$CPPFLAGS -O0"`` to ensure optimizations are turned off for debugging By specifying ``builddir="debug"`` all of the targets will be built and placed in the debug directory relative to the project root. This helps to keep your debug and release artifacts separate; you are of course able to choose a different directory name or omit altogether if you do not care to separate build types. diff --git a/doc/source/development/extending.rst b/doc/source/development/extending.rst index f74eacb6b861d..e67829b8805eb 100644 --- a/doc/source/development/extending.rst +++ b/doc/source/development/extending.rst @@ -99,7 +99,7 @@ The interface consists of two classes. ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ A :class:`pandas.api.extensions.ExtensionDtype` is similar to a ``numpy.dtype`` object. It describes the -data type. Implementors are responsible for a few unique items like the name. +data type. Implementers are responsible for a few unique items like the name. One particularly important item is the ``type`` property. This should be the class that is the scalar type for your data. For example, if you were writing an diff --git a/doc/source/user_guide/merging.rst b/doc/source/user_guide/merging.rst index 3e0e3245e8d64..c9c8478a719f0 100644 --- a/doc/source/user_guide/merging.rst +++ b/doc/source/user_guide/merging.rst @@ -525,7 +525,7 @@ Performing an outer join with duplicate join keys in :class:`DataFrame` .. warning:: - Merging on duplicate keys sigificantly increase the dimensions of the result + Merging on duplicate keys significantly increase the dimensions of the result and can cause a memory overflow. .. _merging.validation: diff --git a/doc/source/user_guide/reshaping.rst b/doc/source/user_guide/reshaping.rst index 83bf453b560ec..a35cfb396f1f6 100644 --- a/doc/source/user_guide/reshaping.rst +++ b/doc/source/user_guide/reshaping.rst @@ -480,7 +480,7 @@ The values can be cast to a different type using the ``dtype`` argument. .. versionadded:: 1.5.0 -:func:`~pandas.from_dummies` coverts the output of :func:`~pandas.get_dummies` back into +:func:`~pandas.from_dummies` converts the output of :func:`~pandas.get_dummies` back into a :class:`Series` of categorical values from indicator values. .. ipython:: python diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst index f1302b639647b..ae9868e6a828f 100755 --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -1079,7 +1079,7 @@ Datetimelike - Bug in masking datetime-like arrays with a boolean mask of an incorrect length not raising an ``IndexError`` (:issue:`30308`) - Bug in :attr:`Timestamp.resolution` being a property instead of a class attribute (:issue:`29910`) - Bug in :func:`pandas.to_datetime` when called with ``None`` raising ``TypeError`` instead of returning ``NaT`` (:issue:`30011`) -- Bug in :func:`pandas.to_datetime` failing for ``deques`` when using ``cache=True`` (the default) (:issue:`29403`) +- Bug in :func:`pandas.to_datetime` failing for ``dequeues`` when using ``cache=True`` (the default) (:issue:`29403`) - Bug in :meth:`Series.item` with ``datetime64`` or ``timedelta64`` dtype, :meth:`DatetimeIndex.item`, and :meth:`TimedeltaIndex.item` returning an integer instead of a :class:`Timestamp` or :class:`Timedelta` (:issue:`30175`) - Bug in :class:`DatetimeIndex` addition when adding a non-optimized :class:`DateOffset` incorrectly dropping timezone information (:issue:`30336`) - Bug in :meth:`DataFrame.drop` where attempting to drop non-existent values from a DatetimeIndex would yield a confusing error message (:issue:`30399`) diff --git a/pandas/_libs/include/pandas/vendored/ujson/lib/ultrajson.h b/pandas/_libs/include/pandas/vendored/ujson/lib/ultrajson.h index d60335fbaee4d..0d62bb0ba915c 100644 --- a/pandas/_libs/include/pandas/vendored/ujson/lib/ultrajson.h +++ b/pandas/_libs/include/pandas/vendored/ujson/lib/ultrajson.h @@ -189,13 +189,13 @@ typedef struct __JSONObjectEncoder { /* Begin iteration of an iterable object (JS_ARRAY or JS_OBJECT) - Implementor should setup iteration state in ti->prv + Implementer should setup iteration state in ti->prv */ JSPFN_ITERBEGIN iterBegin; /* Retrieve next object in an iteration. Should return 0 to indicate iteration - has reached end or 1 if there are more items. Implementor is responsible for + has reached end or 1 if there are more items. Implementer is responsible for keeping state of the iteration. Use ti->prv fields for this */ JSPFN_ITERNEXT iterNext; diff --git a/pandas/_libs/tslibs/parsing.pyx b/pandas/_libs/tslibs/parsing.pyx index bfdd6c58432fa..9590d7511891f 100644 --- a/pandas/_libs/tslibs/parsing.pyx +++ b/pandas/_libs/tslibs/parsing.pyx @@ -950,7 +950,7 @@ def guess_datetime_format(dt_str: str, bint dayfirst=False) -> str | None: # the offset is separated into two tokens, ex. ['+', '0900’]. # This separation will prevent subsequent processing # from correctly parsing the time zone format. - # So in addition to the format nomalization, we rejoin them here. + # So in addition to the format normalization, we rejoin them here. try: tokens[offset_index] = parsed_datetime.strftime("%z") except ValueError: diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index e67c0fd91cd6f..0a9bd2ac2770b 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -1229,7 +1229,7 @@ cdef class _Timedelta(timedelta): return cmp_scalar(self._value, ots._value, op) return self._compare_mismatched_resos(ots, op) - # TODO: re-use/share with Timestamp + # TODO: reuse/share with Timestamp cdef bint _compare_mismatched_resos(self, _Timedelta other, op): # Can't just dispatch to numpy as they silently overflow and get it wrong cdef: diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index 3d97711d5f8c3..e61e374009163 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -455,7 +455,7 @@ def __setitem__(self, key, value) -> None: ------- None """ - # Some notes to the ExtensionArray implementor who may have ended up + # Some notes to the ExtensionArray implementer who may have ended up # here. While this method is not required for the interface, if you # *do* choose to implement __setitem__, then some semantics should be # observed: @@ -775,7 +775,7 @@ def _values_for_argsort(self) -> np.ndarray: Notes ----- The caller is responsible for *not* modifying these values in-place, so - it is safe for implementors to give views on ``self``. + it is safe for implementers to give views on ``self``. Functions that use this (e.g. ``ExtensionArray.argsort``) should ignore entries with missing values in the original array (according to @@ -833,7 +833,7 @@ def argsort( >>> arr.argsort() array([1, 2, 0, 4, 3]) """ - # Implementor note: You have two places to override the behavior of + # Implementer note: You have two places to override the behavior of # argsort. # 1. _values_for_argsort : construct the values passed to np.argsort # 2. argsort : total control over sorting. In case of overriding this, @@ -874,7 +874,7 @@ def argmin(self, skipna: bool = True) -> int: >>> arr.argmin() 1 """ - # Implementor note: You have two places to override the behavior of + # Implementer note: You have two places to override the behavior of # argmin. # 1. _values_for_argsort : construct the values used in nargminmax # 2. argmin itself : total control over sorting. @@ -908,7 +908,7 @@ def argmax(self, skipna: bool = True) -> int: >>> arr.argmax() 3 """ - # Implementor note: You have two places to override the behavior of + # Implementer note: You have two places to override the behavior of # argmax. # 1. _values_for_argsort : construct the values used in nargminmax # 2. argmax itself : total control over sorting. diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py index 28ee6b2476b0d..91960173e7c1e 100644 --- a/pandas/core/arrays/interval.py +++ b/pandas/core/arrays/interval.py @@ -850,7 +850,7 @@ def argsort( ascending = nv.validate_argsort_with_ascending(ascending, (), kwargs) if ascending and kind == "quicksort" and na_position == "last": - # TODO: in an IntervalIndex we can re-use the cached + # TODO: in an IntervalIndex we can reuse the cached # IntervalTree.left_sorter return np.lexsort((self.right, self.left)) diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 14e3decd12185..3412f18a40313 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -962,7 +962,7 @@ def _selected_obj(self): return self.obj[self._selection] # Otherwise _selection is equivalent to _selection_list, so - # _selected_obj matches _obj_with_exclusions, so we can re-use + # _selected_obj matches _obj_with_exclusions, so we can reuse # that and avoid making a copy. return self._obj_with_exclusions @@ -1466,7 +1466,7 @@ def _concat_objects( # when the ax has duplicates # so we resort to this # GH 14776, 30667 - # TODO: can we re-use e.g. _reindex_non_unique? + # TODO: can we reuse e.g. _reindex_non_unique? if ax.has_duplicates and not result.axes[self.axis].equals(ax): # e.g. test_category_order_transformer target = algorithms.unique1d(ax._values) diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index f3a130672a2e5..d881bc3cd041c 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -985,7 +985,7 @@ def _getitem_tuple_same_dim(self, tup: tuple): This is only called after a failed call to _getitem_lowerdim. """ retval = self.obj - # Selecting columns before rows is signficiantly faster + # Selecting columns before rows is significantly faster start_val = (self.ndim - len(tup)) + 1 for i, key in enumerate(reversed(tup)): i = self.ndim - i - start_val diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 83dd9f90a698f..a3c2ede55dabf 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -1415,7 +1415,7 @@ def where( try: # try/except here is equivalent to a self._can_hold_element check, - # but this gets us back 'casted' which we will re-use below; + # but this gets us back 'casted' which we will reuse below; # without using 'casted', expressions.where may do unwanted upcasts. casted = np_can_hold_element(values.dtype, other) except (ValueError, TypeError, LossySetitemError): diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py index d6aeda3d418ed..57dd310f6b12c 100644 --- a/pandas/core/internals/construction.py +++ b/pandas/core/internals/construction.py @@ -550,7 +550,7 @@ def _prep_ndarraylike(values, copy: bool = True) -> np.ndarray: if len(values) == 0: # TODO: check for length-zero range, in which case return int64 dtype? - # TODO: re-use anything in try_cast? + # TODO: reuse anything in try_cast? return np.empty((0, 0), dtype=object) elif isinstance(values, range): arr = range_to_ndarray(values) diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py index bf7c7a1ee4dc7..d6922ba58d2b9 100644 --- a/pandas/core/reshape/reshape.py +++ b/pandas/core/reshape/reshape.py @@ -222,7 +222,7 @@ def mask_all(self) -> bool: @cache_readonly def arange_result(self) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.bool_]]: - # We cache this for re-use in ExtensionBlock._unstack + # We cache this for reuse in ExtensionBlock._unstack dummy_arr = np.arange(len(self.index), dtype=np.intp) new_values, mask = self.get_new_values(dummy_arr, fill_value=-1) return new_values, mask.any(0) diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index f883d9de246ab..39d5b45862a8f 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -2722,7 +2722,7 @@ def hide( - Boolean - ValueError: cannot supply ``subset`` and ``level`` simultaneously. - Note this method only hides the identifed elements so can be chained to hide + Note this method only hides the identified elements so can be chained to hide multiple elements in sequence. Examples diff --git a/pandas/tests/extension/base/ops.py b/pandas/tests/extension/base/ops.py index d2aa4bd63c428..40fab5ec11d7d 100644 --- a/pandas/tests/extension/base/ops.py +++ b/pandas/tests/extension/base/ops.py @@ -259,7 +259,7 @@ def test_invert(self, data): with pytest.raises(TypeError): ~data else: - # Note we do not re-use the pointwise result to construct expected + # Note we do not reuse the pointwise result to construct expected # because python semantics for negating bools are weird see GH#54569 result = ~ser expected = pd.Series(~data, name="name") diff --git a/pandas/tests/frame/constructors/test_from_records.py b/pandas/tests/frame/constructors/test_from_records.py index bb4aed2163dac..4ad4e29550d56 100644 --- a/pandas/tests/frame/constructors/test_from_records.py +++ b/pandas/tests/frame/constructors/test_from_records.py @@ -110,7 +110,7 @@ def test_from_records_sequencelike(self): columns=df.columns ) - # list of tupels (no dtype info) + # list of tuples (no dtype info) result4 = DataFrame.from_records(lists, columns=columns).reindex( columns=df.columns ) diff --git a/pandas/tests/groupby/test_timegrouper.py b/pandas/tests/groupby/test_timegrouper.py index 48c51cdfab4e4..bd0a447d7b19f 100644 --- a/pandas/tests/groupby/test_timegrouper.py +++ b/pandas/tests/groupby/test_timegrouper.py @@ -30,7 +30,7 @@ def frame_for_truncated_bingrouper(): """ DataFrame used by groupby_with_truncated_bingrouper, made into - a separate fixture for easier re-use in + a separate fixture for easier reuse in test_groupby_apply_timegrouper_with_nat_apply_squeeze """ df = DataFrame( diff --git a/pandas/tests/indexing/test_categorical.py b/pandas/tests/indexing/test_categorical.py index d3a6d4bf7cebf..0432c8856e5c5 100644 --- a/pandas/tests/indexing/test_categorical.py +++ b/pandas/tests/indexing/test_categorical.py @@ -91,7 +91,7 @@ def test_loc_setitem_with_expansion_non_category(self, df): ) tm.assert_frame_equal(df3, expected3) - # Settig a new row _and_ new column + # Setting a new row _and_ new column df4 = df.copy() df4.loc["d", "C"] = 10 expected3 = DataFrame( diff --git a/pandas/tests/plotting/test_misc.py b/pandas/tests/plotting/test_misc.py index a5145472203a3..f0d3d66eff462 100644 --- a/pandas/tests/plotting/test_misc.py +++ b/pandas/tests/plotting/test_misc.py @@ -517,9 +517,9 @@ def test_barh_plot_labels_mixed_integer_string(self): # Test barh plot with string and integer at the same column from matplotlib.text import Text - df = DataFrame([{"word": 1, "value": 0}, {"word": "knowledg", "value": 2}]) + df = DataFrame([{"word": 1, "value": 0}, {"word": "knowledge", "value": 2}]) plot_barh = df.plot.barh(x="word", legend=None) - expected_yticklabels = [Text(0, 0, "1"), Text(0, 1, "knowledg")] + expected_yticklabels = [Text(0, 0, "1"), Text(0, 1, "knowledge")] assert all( actual.get_text() == expected.get_text() for actual, expected in zip( diff --git a/pandas/tests/reshape/concat/test_categorical.py b/pandas/tests/reshape/concat/test_categorical.py index 2730b2ffcc4e3..7acd0ff4f4c56 100644 --- a/pandas/tests/reshape/concat/test_categorical.py +++ b/pandas/tests/reshape/concat/test_categorical.py @@ -220,7 +220,7 @@ def test_categorical_concat_gh7864(self): def test_categorical_index_upcast(self): # GH 17629 - # test upcasting to object when concatinating on categorical indexes + # test upcasting to object when concatenating on categorical indexes # with non-identical categories a = DataFrame({"foo": [1, 2]}, index=Categorical(["foo", "bar"])) diff --git a/pandas/tests/series/methods/test_infer_objects.py b/pandas/tests/series/methods/test_infer_objects.py index 25bff46d682be..29abac6b3780e 100644 --- a/pandas/tests/series/methods/test_infer_objects.py +++ b/pandas/tests/series/methods/test_infer_objects.py @@ -31,7 +31,7 @@ def test_infer_objects_series(self, index_or_series): expected = index_or_series([1.0, 2.0, 3.0, np.nan]) tm.assert_equal(actual, expected) - # only soft conversions, unconvertable pass thru unchanged + # only soft conversions, unconvertible pass thru unchanged obj = index_or_series(np.array([1, 2, 3, None, "a"], dtype="O")) actual = obj.infer_objects() diff --git a/pyproject.toml b/pyproject.toml index 063a69213f4a7..10007af5e455f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -790,5 +790,5 @@ exclude_lines = [ directory = "coverage_html_report" [tool.codespell] -ignore-words-list = "blocs, coo, hist, nd, sav, ser, recuse, nin, timere" +ignore-words-list = "blocs, coo, hist, nd, sav, ser, recuse, nin, timere, Amde, expec, expecs" ignore-regex = 'https://([\w/\.])+' From b34d97283595f940d3abd4ec167e449c375cd2d7 Mon Sep 17 00:00:00 2001 From: Matthew Roeschke <10647082+mroeschke@users.noreply.github.com> Date: Mon, 6 Nov 2023 15:28:43 -0800 Subject: [PATCH 6/6] Remove 404 link --- doc/source/getting_started/tutorials.rst | 1 - pyproject.toml | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/doc/source/getting_started/tutorials.rst b/doc/source/getting_started/tutorials.rst index 1220c915c3cbc..4393c3716bdad 100644 --- a/doc/source/getting_started/tutorials.rst +++ b/doc/source/getting_started/tutorials.rst @@ -115,7 +115,6 @@ Various tutorials * `Statistical Data Analysis in Python, tutorial videos, by Christopher Fonnesbeck from SciPy 2013 `_ * `Financial analysis in Python, by Thomas Wiecki `_ * `Intro to pandas data structures, by Greg Reda `_ -* `Pandas and Python: Top 10, by Manish Amde `_ * `Pandas DataFrames Tutorial, by Karlijn Willems `_ * `A concise tutorial with real life examples `_ * `430+ Searchable Pandas recipes by Isshin Inada `_ diff --git a/pyproject.toml b/pyproject.toml index 10007af5e455f..c501d91796e45 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -790,5 +790,5 @@ exclude_lines = [ directory = "coverage_html_report" [tool.codespell] -ignore-words-list = "blocs, coo, hist, nd, sav, ser, recuse, nin, timere, Amde, expec, expecs" +ignore-words-list = "blocs, coo, hist, nd, sav, ser, recuse, nin, timere, expec, expecs" ignore-regex = 'https://([\w/\.])+'