-
-
Notifications
You must be signed in to change notification settings - Fork 18.4k
/
Copy pathnumeric.py
317 lines (276 loc) · 10.3 KB
/
numeric.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
from __future__ import annotations
from typing import (
TYPE_CHECKING,
Literal,
)
import numpy as np
from pandas._libs import lib
from pandas.util._validators import check_dtype_backend
from pandas.core.dtypes.cast import maybe_downcast_numeric
from pandas.core.dtypes.common import (
ensure_object,
is_bool_dtype,
is_decimal,
is_integer_dtype,
is_number,
is_numeric_dtype,
is_scalar,
is_string_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.dtypes import ArrowDtype
from pandas.core.dtypes.generic import (
ABCIndex,
ABCSeries,
)
from pandas.core.arrays import BaseMaskedArray
from pandas.core.arrays.string_ import StringDtype
if TYPE_CHECKING:
from pandas._typing import (
DateTimeErrorChoices,
DtypeBackend,
npt,
)
def to_numeric(
arg,
errors: DateTimeErrorChoices = "raise",
downcast: Literal["integer", "signed", "unsigned", "float"] | None = None,
dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
):
"""
Convert argument to a numeric type.
The default return dtype is `float64` or `int64`
depending on the data supplied. Use the `downcast` parameter
to obtain other dtypes.
Please note that precision loss may occur if really large numbers
are passed in. Due to the internal limitations of `ndarray`, if
numbers smaller than `-9223372036854775808` (np.iinfo(np.int64).min)
or larger than `18446744073709551615` (np.iinfo(np.uint64).max) are
passed in, it is very likely they will be converted to float so that
they can be stored in an `ndarray`. These warnings apply similarly to
`Series` since it internally leverages `ndarray`.
Parameters
----------
arg : scalar, list, tuple, 1-d array, or Series
Argument to be converted.
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If 'raise', then invalid parsing will raise an exception.
- If 'coerce', then invalid parsing will be set as NaN.
- If 'ignore', then invalid parsing will return the input.
downcast : str, default None
Can be 'integer', 'signed', 'unsigned', or 'float'.
If not None, and if the data has been successfully cast to a
numerical dtype (or if the data was numeric to begin with),
downcast that resulting data to the smallest numerical dtype
possible according to the following rules:
- 'integer' or 'signed': smallest signed int dtype (min.: np.int8)
- 'unsigned': smallest unsigned int dtype (min.: np.uint8)
- 'float': smallest float dtype (min.: np.float32)
As this behaviour is separate from the core conversion to
numeric values, any errors raised during the downcasting
will be surfaced regardless of the value of the 'errors' input.
In addition, downcasting will only occur if the size
of the resulting data's dtype is strictly larger than
the dtype it is to be cast to, so if none of the dtypes
checked satisfy that specification, no downcasting will be
performed on the data.
dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable'
Back-end data type applied to the resultant :class:`DataFrame`
(still experimental). Behaviour is as follows:
* ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`
(default).
* ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype`
DataFrame.
.. versionadded:: 2.0
Returns
-------
ret
Numeric if parsing succeeded.
Return type depends on input. Series if Series, otherwise ndarray.
See Also
--------
DataFrame.astype : Cast argument to a specified dtype.
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
numpy.ndarray.astype : Cast a numpy array to a specified type.
DataFrame.convert_dtypes : Convert dtypes.
Examples
--------
Take separate series and convert to numeric, coercing when told to
>>> s = pd.Series(['1.0', '2', -3])
>>> pd.to_numeric(s)
0 1.0
1 2.0
2 -3.0
dtype: float64
>>> pd.to_numeric(s, downcast='float')
0 1.0
1 2.0
2 -3.0
dtype: float32
>>> pd.to_numeric(s, downcast='signed')
0 1
1 2
2 -3
dtype: int8
>>> s = pd.Series(['apple', '1.0', '2', -3])
>>> pd.to_numeric(s, errors='ignore')
0 apple
1 1.0
2 2
3 -3
dtype: object
>>> pd.to_numeric(s, errors='coerce')
0 NaN
1 1.0
2 2.0
3 -3.0
dtype: float64
Downcasting of nullable integer and floating dtypes is supported:
>>> s = pd.Series([1, 2, 3], dtype="Int64")
>>> pd.to_numeric(s, downcast="integer")
0 1
1 2
2 3
dtype: Int8
>>> s = pd.Series([1.0, 2.1, 3.0], dtype="Float64")
>>> pd.to_numeric(s, downcast="float")
0 1.0
1 2.1
2 3.0
dtype: Float32
"""
if downcast not in (None, "integer", "signed", "unsigned", "float"):
raise ValueError("invalid downcasting method provided")
if errors not in ("ignore", "raise", "coerce"):
raise ValueError("invalid error value specified")
check_dtype_backend(dtype_backend)
is_series = False
is_index = False
is_scalars = False
if isinstance(arg, ABCSeries):
is_series = True
values = arg.values
elif isinstance(arg, ABCIndex):
is_index = True
if needs_i8_conversion(arg.dtype):
values = arg.view("i8")
else:
values = arg.values
elif isinstance(arg, (list, tuple)):
values = np.array(arg, dtype="O")
elif is_scalar(arg):
if is_decimal(arg):
return float(arg)
if is_number(arg):
return arg
is_scalars = True
values = np.array([arg], dtype="O")
elif getattr(arg, "ndim", 1) > 1:
raise TypeError("arg must be a list, tuple, 1-d array, or Series")
else:
values = arg
orig_values = values
# GH33013: for IntegerArray & FloatingArray extract non-null values for casting
# save mask to reconstruct the full array after casting
mask: npt.NDArray[np.bool_] | None = None
if isinstance(values, BaseMaskedArray):
mask = values._mask
values = values._data[~mask]
values_dtype = getattr(values, "dtype", None)
if isinstance(values_dtype, ArrowDtype):
mask = values.isna()
values = values.dropna().to_numpy()
new_mask: np.ndarray | None = None
if is_numeric_dtype(values_dtype):
pass
elif lib.is_np_dtype(values_dtype, "mM"):
values = values.view(np.int64)
else:
values = ensure_object(values)
coerce_numeric = errors not in ("ignore", "raise")
try:
values, new_mask = lib.maybe_convert_numeric( # type: ignore[call-overload] # noqa: E501
values,
set(),
coerce_numeric=coerce_numeric,
convert_to_masked_nullable=dtype_backend is not lib.no_default
or isinstance(values_dtype, StringDtype),
)
except (ValueError, TypeError):
if errors == "raise":
raise
values = orig_values
if new_mask is not None:
# Remove unnecessary values, is expected later anyway and enables
# downcasting
values = values[~new_mask]
elif (
dtype_backend is not lib.no_default
and new_mask is None
or isinstance(values_dtype, StringDtype)
):
new_mask = np.zeros(values.shape, dtype=np.bool_)
# attempt downcast only if the data has been successfully converted
# to a numerical dtype and if a downcast method has been specified
if downcast is not None and is_numeric_dtype(values.dtype):
typecodes: str | None = None
if downcast in ("integer", "signed"):
typecodes = np.typecodes["Integer"]
elif downcast == "unsigned" and (not len(values) or np.min(values) >= 0):
typecodes = np.typecodes["UnsignedInteger"]
elif downcast == "float":
typecodes = np.typecodes["Float"]
# pandas support goes only to np.float32,
# as float dtypes smaller than that are
# extremely rare and not well supported
float_32_char = np.dtype(np.float32).char
float_32_ind = typecodes.index(float_32_char)
typecodes = typecodes[float_32_ind:]
if typecodes is not None:
# from smallest to largest
for typecode in typecodes:
dtype = np.dtype(typecode)
if dtype.itemsize <= values.dtype.itemsize:
values = maybe_downcast_numeric(values, dtype)
# successful conversion
if values.dtype == dtype:
break
# GH33013: for IntegerArray, BooleanArray & FloatingArray need to reconstruct
# masked array
if (mask is not None or new_mask is not None) and not is_string_dtype(values.dtype):
if mask is None or (new_mask is not None and new_mask.shape == mask.shape):
# GH 52588
mask = new_mask
else:
mask = mask.copy()
assert isinstance(mask, np.ndarray)
data = np.zeros(mask.shape, dtype=values.dtype)
data[~mask] = values
from pandas.core.arrays import (
ArrowExtensionArray,
BooleanArray,
FloatingArray,
IntegerArray,
)
klass: type[IntegerArray | BooleanArray | FloatingArray]
if is_integer_dtype(data.dtype):
klass = IntegerArray
elif is_bool_dtype(data.dtype):
klass = BooleanArray
else:
klass = FloatingArray
values = klass(data, mask)
if dtype_backend == "pyarrow" or isinstance(values_dtype, ArrowDtype):
values = ArrowExtensionArray(values.__arrow_array__())
if is_series:
return arg._constructor(values, index=arg.index, name=arg.name)
elif is_index:
# because we want to coerce to numeric if possible,
# do not use _shallow_copy
from pandas import Index
return Index(values, name=arg.name)
elif is_scalars:
return values[0]
else:
return values