Skip to content

Commit 9ac3eca

Browse files
committed
drive-by walruses
1 parent de6aefe commit 9ac3eca

File tree

12 files changed

+17
-32
lines changed

12 files changed

+17
-32
lines changed

pandas/_libs/tslibs/period.pyx

+2-2
Original file line numberDiff line numberDiff line change
@@ -1910,7 +1910,7 @@ cdef class _Period(PeriodMixin):
19101910

19111911
Parameters
19121912
----------
1913-
freq : str, BaseOffset
1913+
freq : str, DateOffset
19141914
The target frequency to convert the Period object to.
19151915
If a string is provided,
19161916
it must be a valid :ref:`period alias <timeseries.period_aliases>`.
@@ -2596,7 +2596,7 @@ cdef class _Period(PeriodMixin):
25962596
25972597
Parameters
25982598
----------
2599-
freq : str, BaseOffset
2599+
freq : str, DateOffset
26002600
Frequency to use for the returned period.
26012601
26022602
See Also

pandas/core/arrays/categorical.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -2974,8 +2974,7 @@ def _delegate_method(self, name: str, *args, **kwargs):
29742974
from pandas import Series
29752975

29762976
method = getattr(self._parent, name)
2977-
res = method(*args, **kwargs)
2978-
if res is not None:
2977+
if (res := method(*args, **kwargs)) is not None:
29792978
return Series(res, index=self._index, name=self._name)
29802979

29812980

pandas/core/base.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -126,8 +126,7 @@ def __sizeof__(self) -> int:
126126
Generates the total memory usage for an object that returns
127127
either a value or Series of values
128128
"""
129-
memory_usage = getattr(self, "memory_usage", None)
130-
if memory_usage:
129+
if memory_usage := getattr(self, "memory_usage", None):
131130
mem = memory_usage(deep=True)
132131
return int(mem if is_scalar(mem) else mem.sum())
133132

pandas/core/computation/engines.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -36,9 +36,8 @@ def _check_ne_builtin_clash(expr: Expr) -> None:
3636
Terms can contain
3737
"""
3838
names = expr.names
39-
overlap = names & _ne_builtins
4039

41-
if overlap:
40+
if overlap := names & _ne_builtins:
4241
s = ", ".join([repr(x) for x in overlap])
4342
raise NumExprClobberingError(
4443
f'Variables in expression "{expr}" overlap with builtins: ({s})'

pandas/core/dtypes/common.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -1819,8 +1819,7 @@ def pandas_dtype(dtype) -> DtypeObj:
18191819
return StringDtype(na_value=np.nan)
18201820

18211821
# registered extension types
1822-
result = registry.find(dtype)
1823-
if result is not None:
1822+
if (result := registry.find(dtype)) is not None:
18241823
if isinstance(result, type):
18251824
# GH 31356, GH 54592
18261825
warnings.warn(

pandas/core/dtypes/dtypes.py

+2-4
Original file line numberDiff line numberDiff line change
@@ -889,8 +889,7 @@ def construct_from_string(cls, string: str_type) -> DatetimeTZDtype:
889889
)
890890

891891
msg = f"Cannot construct a 'DatetimeTZDtype' from '{string}'"
892-
match = cls._match.match(string)
893-
if match:
892+
if match := cls._match.match(string):
894893
d = match.groupdict()
895894
try:
896895
return cls(unit=d["unit"], tz=d["tz"])
@@ -1999,9 +1998,8 @@ def _parse_subtype(dtype: str) -> tuple[str, bool]:
19991998
When the subtype cannot be extracted.
20001999
"""
20012000
xpr = re.compile(r"Sparse\[(?P<subtype>[^,]*)(, )?(?P<fill_value>.*?)?\]$")
2002-
m = xpr.match(dtype)
20032001
has_fill_value = False
2004-
if m:
2002+
if m := xpr.match(dtype):
20052003
subtype = m.groupdict()["subtype"]
20062004
has_fill_value = bool(m.groupdict()["fill_value"])
20072005
elif dtype == "Sparse":

pandas/core/groupby/grouper.py

+2-4
Original file line numberDiff line numberDiff line change
@@ -460,8 +460,7 @@ def __init__(
460460
# we have a single grouper which may be a myriad of things,
461461
# some of which are dependent on the passing in level
462462

463-
ilevel = self._ilevel
464-
if ilevel is not None:
463+
if (ilevel := self._ilevel) is not None:
465464
# In extant tests, the new self.grouping_vector matches
466465
# `index.get_level_values(ilevel)` whenever
467466
# mapper is None and isinstance(index, MultiIndex)
@@ -547,8 +546,7 @@ def _passed_categorical(self) -> bool:
547546

548547
@cache_readonly
549548
def name(self) -> Hashable:
550-
ilevel = self._ilevel
551-
if ilevel is not None:
549+
if (ilevel := self._ilevel) is not None:
552550
return self._index.names[ilevel]
553551

554552
if isinstance(self._orig_grouper, (Index, Series)):

pandas/core/groupby/ops.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -695,9 +695,8 @@ def size(self) -> Series:
695695
Compute group sizes.
696696
"""
697697
ids = self.ids
698-
ngroups = self.ngroups
699698
out: np.ndarray | list
700-
if ngroups:
699+
if ngroups := self.ngroups:
701700
out = np.bincount(ids[ids != -1], minlength=ngroups)
702701
else:
703702
out = []

pandas/core/indexes/base.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -6108,9 +6108,8 @@ def _raise_if_missing(self, key, indexer, axis_name: str_t) -> None:
61086108

61096109
# Count missing values
61106110
missing_mask = indexer < 0
6111-
nmissing = missing_mask.sum()
61126111

6113-
if nmissing:
6112+
if nmissing := missing_mask.sum():
61146113
if nmissing == len(indexer):
61156114
raise KeyError(f"None of [{key}] are in the [{axis_name}]")
61166115

pandas/core/interchange/from_dataframe.py

+3-6
Original file line numberDiff line numberDiff line change
@@ -144,8 +144,7 @@ def _from_dataframe(df: DataFrameXchg, allow_copy: bool = True) -> pd.DataFrame:
144144
else:
145145
pandas_df = pd.concat(pandas_dfs, axis=0, ignore_index=True, copy=False)
146146

147-
index_obj = df.metadata.get("pandas.index", None)
148-
if index_obj is not None:
147+
if (index_obj := df.metadata.get("pandas.index", None)) is not None:
149148
pandas_df.index = index_obj
150149

151150
return pandas_df
@@ -372,8 +371,7 @@ def string_column_to_ndarray(col: Column) -> tuple[np.ndarray, Any]:
372371
def parse_datetime_format_str(format_str, data) -> pd.Series | np.ndarray:
373372
"""Parse datetime `format_str` to interpret the `data`."""
374373
# timestamp 'ts{unit}:tz'
375-
timestamp_meta = re.match(r"ts([smun]):(.*)", format_str)
376-
if timestamp_meta:
374+
if timestamp_meta := re.match(r"ts([smun]):(.*)", format_str):
377375
unit, tz = timestamp_meta.group(1), timestamp_meta.group(2)
378376
if unit != "s":
379377
# the format string describes only a first letter of the unit, so
@@ -386,8 +384,7 @@ def parse_datetime_format_str(format_str, data) -> pd.Series | np.ndarray:
386384
return data
387385

388386
# date 'td{Days/Ms}'
389-
date_meta = re.match(r"td([Dm])", format_str)
390-
if date_meta:
387+
if date_meta := re.match(r"td([Dm])", format_str):
391388
unit = date_meta.group(1)
392389
if unit == "D":
393390
# NumPy doesn't support DAY unit, so converting days to seconds

pandas/core/strings/object_array.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -530,8 +530,7 @@ def g(x):
530530
def f(x):
531531
if not isinstance(x, str):
532532
return empty_row
533-
m = regex.search(x)
534-
if m:
533+
if m := regex.search(x):
535534
return [na_value if item is None else item for item in m.groups()]
536535
else:
537536
return empty_row

pandas/core/window/rolling.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -741,8 +741,7 @@ def _apply(
741741
codes = self._grouper.codes
742742
levels = copy.copy(self._grouper.levels)
743743

744-
group_indices = self._grouper.indices.values()
745-
if group_indices:
744+
if group_indices := self._grouper.indices.values():
746745
indexer = np.concatenate(list(group_indices))
747746
else:
748747
indexer = np.array([], dtype=np.intp)

0 commit comments

Comments
 (0)