Skip to content

Commit 2a3b05a

Browse files
StanczakDominikjreback
authored andcommitted
CLN/INT: Rename _possibly to _maybe (GH15764)
Also rename "private" functions in pandas.type.cast closes #15764 Author: Dominik Stanczak <[email protected]> Closes #15771 from StanczakDominik/rename-possibly and squashes the following commits: 486b932 [Dominik Stanczak] Cleanup missed linting errors 188c48b [Dominik Stanczak] CLN/INT: Rename _possibly to _maybe
1 parent bc1235e commit 2a3b05a

22 files changed

+228
-231
lines changed

pandas/computation/expr.py

+17-17
Original file line numberDiff line numberDiff line change
@@ -348,7 +348,7 @@ def _rewrite_membership_op(self, node, left, right):
348348
op = self.visit(op_instance)
349349
return op, op_instance, left, right
350350

351-
def _possibly_transform_eq_ne(self, node, left=None, right=None):
351+
def _maybe_transform_eq_ne(self, node, left=None, right=None):
352352
if left is None:
353353
left = self.visit(node.left, side='left')
354354
if right is None:
@@ -357,7 +357,7 @@ def _possibly_transform_eq_ne(self, node, left=None, right=None):
357357
right)
358358
return op, op_class, left, right
359359

360-
def _possibly_downcast_constants(self, left, right):
360+
def _maybe_downcast_constants(self, left, right):
361361
f32 = np.dtype(np.float32)
362362
if left.isscalar and not right.isscalar and right.return_type == f32:
363363
# right is a float32 array, left is a scalar
@@ -370,7 +370,7 @@ def _possibly_downcast_constants(self, left, right):
370370

371371
return left, right
372372

373-
def _possibly_eval(self, binop, eval_in_python):
373+
def _maybe_eval(self, binop, eval_in_python):
374374
# eval `in` and `not in` (for now) in "partial" python space
375375
# things that can be evaluated in "eval" space will be turned into
376376
# temporary variables. for example,
@@ -380,10 +380,10 @@ def _possibly_eval(self, binop, eval_in_python):
380380
return binop.evaluate(self.env, self.engine, self.parser,
381381
self.term_type, eval_in_python)
382382

383-
def _possibly_evaluate_binop(self, op, op_class, lhs, rhs,
384-
eval_in_python=('in', 'not in'),
385-
maybe_eval_in_python=('==', '!=', '<', '>',
386-
'<=', '>=')):
383+
def _maybe_evaluate_binop(self, op, op_class, lhs, rhs,
384+
eval_in_python=('in', 'not in'),
385+
maybe_eval_in_python=('==', '!=', '<', '>',
386+
'<=', '>=')):
387387
res = op(lhs, rhs)
388388

389389
if res.has_invalid_return_type:
@@ -397,24 +397,24 @@ def _possibly_evaluate_binop(self, op, op_class, lhs, rhs,
397397
getattr(rhs, 'is_datetime', False)):
398398
# all date ops must be done in python bc numexpr doesn't work
399399
# well with NaT
400-
return self._possibly_eval(res, self.binary_ops)
400+
return self._maybe_eval(res, self.binary_ops)
401401

402402
if res.op in eval_in_python:
403403
# "in"/"not in" ops are always evaluated in python
404-
return self._possibly_eval(res, eval_in_python)
404+
return self._maybe_eval(res, eval_in_python)
405405
elif self.engine != 'pytables':
406406
if (getattr(lhs, 'return_type', None) == object or
407407
getattr(rhs, 'return_type', None) == object):
408408
# evaluate "==" and "!=" in python if either of our operands
409409
# has an object return type
410-
return self._possibly_eval(res, eval_in_python +
411-
maybe_eval_in_python)
410+
return self._maybe_eval(res, eval_in_python +
411+
maybe_eval_in_python)
412412
return res
413413

414414
def visit_BinOp(self, node, **kwargs):
415-
op, op_class, left, right = self._possibly_transform_eq_ne(node)
416-
left, right = self._possibly_downcast_constants(left, right)
417-
return self._possibly_evaluate_binop(op, op_class, left, right)
415+
op, op_class, left, right = self._maybe_transform_eq_ne(node)
416+
left, right = self._maybe_downcast_constants(left, right)
417+
return self._maybe_evaluate_binop(op, op_class, left, right)
418418

419419
def visit_Div(self, node, **kwargs):
420420
truediv = self.env.scope['truediv']
@@ -662,9 +662,9 @@ def visitor(x, y):
662662
lhs = self._try_visit_binop(x)
663663
rhs = self._try_visit_binop(y)
664664

665-
op, op_class, lhs, rhs = self._possibly_transform_eq_ne(node, lhs,
666-
rhs)
667-
return self._possibly_evaluate_binop(op, node.op, lhs, rhs)
665+
op, op_class, lhs, rhs = self._maybe_transform_eq_ne(
666+
node, lhs, rhs)
667+
return self._maybe_evaluate_binop(op, node.op, lhs, rhs)
668668

669669
operands = node.values
670670
return reduce(visitor, operands)

pandas/core/algorithms.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
import numpy as np
88

99
from pandas import compat, _np_version_under1p8
10-
from pandas.types.cast import _maybe_promote
10+
from pandas.types.cast import maybe_promote
1111
from pandas.types.generic import ABCSeries, ABCIndex
1212
from pandas.types.common import (is_unsigned_integer_dtype,
1313
is_signed_integer_dtype,
@@ -1297,7 +1297,7 @@ def take_nd(arr, indexer, axis=0, out=None, fill_value=np.nan, mask_info=None,
12971297
else:
12981298
# check for promotion based on types only (do this first because
12991299
# it's faster than computing a mask)
1300-
dtype, fill_value = _maybe_promote(arr.dtype, fill_value)
1300+
dtype, fill_value = maybe_promote(arr.dtype, fill_value)
13011301
if dtype != arr.dtype and (out is None or out.dtype != dtype):
13021302
# check if promotion is actually required based on indexer
13031303
if mask_info is not None:
@@ -1380,7 +1380,7 @@ def take_2d_multi(arr, indexer, out=None, fill_value=np.nan, mask_info=None,
13801380
else:
13811381
# check for promotion based on types only (do this first because
13821382
# it's faster than computing a mask)
1383-
dtype, fill_value = _maybe_promote(arr.dtype, fill_value)
1383+
dtype, fill_value = maybe_promote(arr.dtype, fill_value)
13841384
if dtype != arr.dtype and (out is None or out.dtype != dtype):
13851385
# check if promotion is actually required based on indexer
13861386
if mask_info is not None:

pandas/core/categorical.py

+10-11
Original file line numberDiff line numberDiff line change
@@ -10,8 +10,8 @@
1010

1111
from pandas.types.generic import ABCSeries, ABCIndexClass, ABCCategoricalIndex
1212
from pandas.types.missing import isnull, notnull
13-
from pandas.types.cast import (_possibly_infer_to_datetimelike,
14-
_coerce_indexer_dtype)
13+
from pandas.types.cast import (maybe_infer_to_datetimelike,
14+
coerce_indexer_dtype)
1515
from pandas.types.dtypes import CategoricalDtype
1616
from pandas.types.common import (_ensure_int64,
1717
_ensure_object,
@@ -237,7 +237,7 @@ def __init__(self, values, categories=None, ordered=False, fastpath=False):
237237

238238
if fastpath:
239239
# fast path
240-
self._codes = _coerce_indexer_dtype(values, categories)
240+
self._codes = coerce_indexer_dtype(values, categories)
241241
self._categories = self._validate_categories(
242242
categories, fastpath=isinstance(categories, ABCIndexClass))
243243
self._ordered = ordered
@@ -266,8 +266,7 @@ def __init__(self, values, categories=None, ordered=False, fastpath=False):
266266
# correctly no need here this is an issue because _sanitize_array
267267
# also coerces np.nan to a string under certain versions of numpy
268268
# as well
269-
values = _possibly_infer_to_datetimelike(values,
270-
convert_dates=True)
269+
values = maybe_infer_to_datetimelike(values, convert_dates=True)
271270
if not isinstance(values, np.ndarray):
272271
values = _convert_to_list_like(values)
273272
from pandas.core.series import _sanitize_array
@@ -324,7 +323,7 @@ def __init__(self, values, categories=None, ordered=False, fastpath=False):
324323

325324
self.set_ordered(ordered or False, inplace=True)
326325
self._categories = categories
327-
self._codes = _coerce_indexer_dtype(codes, categories)
326+
self._codes = coerce_indexer_dtype(codes, categories)
328327

329328
@property
330329
def _constructor(self):
@@ -877,7 +876,7 @@ def add_categories(self, new_categories, inplace=False):
877876
new_categories = list(self._categories) + list(new_categories)
878877
cat = self if inplace else self.copy()
879878
cat._categories = self._validate_categories(new_categories)
880-
cat._codes = _coerce_indexer_dtype(cat._codes, new_categories)
879+
cat._codes = coerce_indexer_dtype(cat._codes, new_categories)
881880
if not inplace:
882881
return cat
883882

@@ -961,7 +960,7 @@ def remove_unused_categories(self, inplace=False):
961960
idx, inv = idx[1:], inv - 1
962961

963962
cat._categories = cat.categories.take(idx)
964-
cat._codes = _coerce_indexer_dtype(inv, self._categories)
963+
cat._codes = coerce_indexer_dtype(inv, self._categories)
965964

966965
if not inplace:
967966
return cat
@@ -1065,8 +1064,8 @@ def __setstate__(self, state):
10651064
state['_categories'] = self._validate_categories(state.pop(
10661065
'_levels'))
10671066
if '_codes' not in state and 'labels' in state:
1068-
state['_codes'] = _coerce_indexer_dtype(state.pop('labels'),
1069-
state['_categories'])
1067+
state['_codes'] = coerce_indexer_dtype(
1068+
state.pop('labels'), state['_categories'])
10701069

10711070
# 0.16.0 ordered change
10721071
if '_ordered' not in state:
@@ -2062,7 +2061,7 @@ def _get_codes_for_values(values, categories):
20622061
(_, _), cats = _get_data_algo(categories, _hashtables)
20632062
t = hash_klass(len(cats))
20642063
t.map_locations(cats)
2065-
return _coerce_indexer_dtype(t.lookup(vals), cats)
2064+
return coerce_indexer_dtype(t.lookup(vals), cats)
20662065

20672066

20682067
def _convert_to_list_like(list_like):

pandas/core/frame.py

+27-27
Original file line numberDiff line numberDiff line change
@@ -23,15 +23,15 @@
2323
import numpy as np
2424
import numpy.ma as ma
2525

26-
from pandas.types.cast import (_maybe_upcast, _infer_dtype_from_scalar,
27-
_possibly_cast_to_datetime,
28-
_possibly_infer_to_datetimelike,
29-
_possibly_convert_platform,
30-
_possibly_downcast_to_dtype,
31-
_invalidate_string_dtypes,
32-
_coerce_to_dtypes,
33-
_maybe_upcast_putmask,
34-
_find_common_type)
26+
from pandas.types.cast import (maybe_upcast, infer_dtype_from_scalar,
27+
maybe_cast_to_datetime,
28+
maybe_infer_to_datetimelike,
29+
maybe_convert_platform,
30+
maybe_downcast_to_dtype,
31+
invalidate_string_dtypes,
32+
coerce_to_dtypes,
33+
maybe_upcast_putmask,
34+
find_common_type)
3535
from pandas.types.common import (is_categorical_dtype,
3636
is_object_dtype,
3737
is_extension_type,
@@ -275,7 +275,7 @@ def __init__(self, data=None, index=None, columns=None, dtype=None,
275275
else:
276276
mask = ma.getmaskarray(data)
277277
if mask.any():
278-
data, fill_value = _maybe_upcast(data, copy=True)
278+
data, fill_value = maybe_upcast(data, copy=True)
279279
data[mask] = fill_value
280280
else:
281281
data = data.copy()
@@ -335,7 +335,7 @@ def __init__(self, data=None, index=None, columns=None, dtype=None,
335335
if isinstance(data, compat.string_types) and dtype is None:
336336
dtype = np.object_
337337
if dtype is None:
338-
dtype, data = _infer_dtype_from_scalar(data)
338+
dtype, data = infer_dtype_from_scalar(data)
339339

340340
values = np.empty((len(index), len(columns)), dtype=dtype)
341341
values.fill(data)
@@ -469,7 +469,7 @@ def _get_axes(N, K, index=index, columns=columns):
469469
# on the entire block; this is to convert if we have datetimelike's
470470
# embedded in an object type
471471
if dtype is None and is_object_dtype(values):
472-
values = _possibly_infer_to_datetimelike(values)
472+
values = maybe_infer_to_datetimelike(values)
473473

474474
return create_block_manager_from_blocks([values], [columns, index])
475475

@@ -2359,7 +2359,7 @@ def select_dtypes(self, include=None, exclude=None):
23592359
include, exclude = map(
23602360
lambda x: frozenset(map(_get_dtype_from_object, x)), selection)
23612361
for dtypes in (include, exclude):
2362-
_invalidate_string_dtypes(dtypes)
2362+
invalidate_string_dtypes(dtypes)
23632363

23642364
# can't both include AND exclude!
23652365
if not include.isdisjoint(exclude):
@@ -2659,7 +2659,7 @@ def reindexer(value):
26592659
value = _sanitize_index(value, self.index, copy=False)
26602660
if not isinstance(value, (np.ndarray, Index)):
26612661
if isinstance(value, list) and len(value) > 0:
2662-
value = _possibly_convert_platform(value)
2662+
value = maybe_convert_platform(value)
26632663
else:
26642664
value = com._asarray_tuplesafe(value)
26652665
elif value.ndim == 2:
@@ -2671,13 +2671,13 @@ def reindexer(value):
26712671

26722672
# possibly infer to datetimelike
26732673
if is_object_dtype(value.dtype):
2674-
value = _possibly_infer_to_datetimelike(value)
2674+
value = maybe_infer_to_datetimelike(value)
26752675

26762676
else:
26772677
# upcast the scalar
2678-
dtype, value = _infer_dtype_from_scalar(value)
2678+
dtype, value = infer_dtype_from_scalar(value)
26792679
value = np.repeat(value, len(self.index)).astype(dtype)
2680-
value = _possibly_cast_to_datetime(value, dtype)
2680+
value = maybe_cast_to_datetime(value, dtype)
26812681

26822682
# return internal types directly
26832683
if is_extension_type(value):
@@ -3000,8 +3000,8 @@ def _maybe_casted_values(index, labels=None):
30003000
else:
30013001
values = values.take(labels)
30023002
if mask.any():
3003-
values, changed = _maybe_upcast_putmask(values, mask,
3004-
np.nan)
3003+
values, changed = maybe_upcast_putmask(
3004+
values, mask, np.nan)
30053005
return values
30063006

30073007
new_index = _default_index(len(new_obj))
@@ -3722,7 +3722,7 @@ def combine(self, other, func, fill_value=None, overwrite=True):
37223722
# if we have different dtypes, possibily promote
37233723
new_dtype = this_dtype
37243724
if not is_dtype_equal(this_dtype, other_dtype):
3725-
new_dtype = _find_common_type([this_dtype, other_dtype])
3725+
new_dtype = find_common_type([this_dtype, other_dtype])
37263726
if not is_dtype_equal(this_dtype, new_dtype):
37273727
series = series.astype(new_dtype)
37283728
if not is_dtype_equal(other_dtype, new_dtype):
@@ -3743,13 +3743,13 @@ def combine(self, other, func, fill_value=None, overwrite=True):
37433743
# try to downcast back to the original dtype
37443744
if needs_i8_conversion_i:
37453745
# ToDo: This conversion should be handled in
3746-
# _possibly_cast_to_datetime but the change affects lot...
3746+
# _maybe_cast_to_datetime but the change affects lot...
37473747
if is_datetime64tz_dtype(new_dtype):
37483748
arr = DatetimeIndex._simple_new(arr, tz=new_dtype.tz)
37493749
else:
3750-
arr = _possibly_cast_to_datetime(arr, new_dtype)
3750+
arr = maybe_cast_to_datetime(arr, new_dtype)
37513751
else:
3752-
arr = _possibly_downcast_to_dtype(arr, this_dtype)
3752+
arr = maybe_downcast_to_dtype(arr, this_dtype)
37533753

37543754
result[col] = arr
37553755

@@ -5003,7 +5003,7 @@ def f(x):
50035003

50045004
# try to coerce to the original dtypes item by item if we can
50055005
if axis == 0:
5006-
result = _coerce_to_dtypes(result, self.dtypes)
5006+
result = coerce_to_dtypes(result, self.dtypes)
50075007

50085008
return Series(result, index=labels)
50095009

@@ -5505,7 +5505,7 @@ def _prep_ndarray(values, copy=True):
55055505
return np.empty((0, 0), dtype=object)
55065506

55075507
def convert(v):
5508-
return _possibly_convert_platform(v)
5508+
return maybe_convert_platform(v)
55095509

55105510
# we could have a 1-dim or 2-dim list here
55115511
# this is equiv of np.asarray, but does object conversion
@@ -5601,7 +5601,7 @@ def _masked_rec_array_to_mgr(data, index, columns, dtype, copy):
56015601
for fv, arr, col in zip(fill_value, arrays, arr_columns):
56025602
mask = ma.getmaskarray(data[col])
56035603
if mask.any():
5604-
arr, fv = _maybe_upcast(arr, fill_value=fv, copy=True)
5604+
arr, fv = maybe_upcast(arr, fill_value=fv, copy=True)
56055605
arr[mask] = fv
56065606
new_arrays.append(arr)
56075607

@@ -5699,7 +5699,7 @@ def _convert_object_array(content, columns, coerce_float=False, dtype=None):
56995699
def convert(arr):
57005700
if dtype != object and dtype != np.object:
57015701
arr = lib.maybe_convert_objects(arr, try_float=coerce_float)
5702-
arr = _possibly_cast_to_datetime(arr, dtype)
5702+
arr = maybe_cast_to_datetime(arr, dtype)
57035703
return arr
57045704

57055705
arrays = [convert(arr) for arr in content]

pandas/core/generic.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@
2323
is_list_like,
2424
is_dict_like,
2525
is_re_compilable)
26-
from pandas.types.cast import _maybe_promote, _maybe_upcast_putmask
26+
from pandas.types.cast import maybe_promote, maybe_upcast_putmask
2727
from pandas.types.missing import isnull, notnull
2828
from pandas.types.generic import ABCSeries, ABCPanel
2929

@@ -4956,10 +4956,10 @@ def _where(self, cond, other=np.nan, inplace=False, axis=None, level=None,
49564956
# or not try_quick
49574957
if not try_quick:
49584958

4959-
dtype, fill_value = _maybe_promote(other.dtype)
4959+
dtype, fill_value = maybe_promote(other.dtype)
49604960
new_other = np.empty(len(icond), dtype=dtype)
49614961
new_other.fill(fill_value)
4962-
_maybe_upcast_putmask(new_other, icond, other)
4962+
maybe_upcast_putmask(new_other, icond, other)
49634963
other = new_other
49644964

49654965
else:

pandas/core/groupby.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@
3232
_ensure_object,
3333
_ensure_categorical,
3434
_ensure_float)
35-
from pandas.types.cast import _possibly_downcast_to_dtype
35+
from pandas.types.cast import maybe_downcast_to_dtype
3636
from pandas.types.missing import isnull, notnull, _maybe_fill
3737

3838
from pandas.core.common import (_values_from_object, AbstractMethodError,
@@ -783,7 +783,7 @@ def _try_cast(self, result, obj, numeric_only=False):
783783

784784
if not is_scalar(result):
785785
if numeric_only and is_numeric_dtype(dtype) or not numeric_only:
786-
result = _possibly_downcast_to_dtype(result, dtype)
786+
result = maybe_downcast_to_dtype(result, dtype)
787787

788788
return result
789789

@@ -2914,7 +2914,7 @@ def transform(self, func, *args, **kwargs):
29142914
# the cython take a different path (and casting)
29152915
dtype = self._selected_obj.dtype
29162916
if is_numeric_dtype(dtype):
2917-
result = _possibly_downcast_to_dtype(result, dtype)
2917+
result = maybe_downcast_to_dtype(result, dtype)
29182918

29192919
result.name = self._selected_obj.name
29202920
result.index = self._selected_obj.index

0 commit comments

Comments
 (0)