diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 031c74b1cc367..ca6c38e59597b 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -109,6 +109,54 @@ Beginning with this version, the default is now to use the more accurate parser ``floating_precision="legacy"`` to use the legacy parser. The change to using the higher precision parser by default should have no impact on performance. (:issue:`17154`) +.. _whatsnew_120.floating: + +Experimental nullable data types for float data +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +We've added :class:`Float32Dtype` / :class:`Float64Dtype` and :class:`~arrays.FloatingArray`, +an extension data type dedicated to floating point data that can hold the +``pd.NA`` missing value indicator (:issue:`32265`, :issue:`34307`). + +While the default float data type already supports missing values using ``np.nan``, +this new data type uses ``pd.NA`` (and its corresponding behaviour) as missing +value indicator, in line with the already existing nullable :ref:`integer ` +and :ref:`boolean ` data types. + +One example where the behaviour of ``np.nan`` and ``pd.NA`` is different is +comparison operations: + +.. ipython:: python + + # the default numpy float64 dtype + s1 = pd.Series([1.5, None]) + s1 + s1 > 1 + +.. ipython:: python + + # the new nullable float64 dtype + s2 = pd.Series([1.5, None], dtype="Float64") + s2 + s2 > 1 + +See the :ref:`missing_data.NA` doc section for more details on the behaviour +when using the ``pd.NA`` missing value indicator. + +As shown above, the dtype can be specified using the "Float64" or "Float32" +string (capitalized to distinguish it from the default "float64" data type). +Alternatively, you can also use the dtype object: + +.. ipython:: python + + pd.Series([1.5, None], dtype=pd.Float32Dtype()) + +.. warning:: + + Experimental: the new floating data types are currently experimental, and its + behaviour or API may still change without warning. Expecially the behaviour + regarding NaN (distinct from NA missing values) is subject to change. + .. _whatsnew_120.enhancements.other: Other enhancements diff --git a/pandas/__init__.py b/pandas/__init__.py index 70bb0c8a2cb51..cf7ae2505b72d 100644 --- a/pandas/__init__.py +++ b/pandas/__init__.py @@ -58,6 +58,8 @@ UInt16Dtype, UInt32Dtype, UInt64Dtype, + Float32Dtype, + Float64Dtype, CategoricalDtype, PeriodDtype, IntervalDtype, diff --git a/pandas/_testing.py b/pandas/_testing.py index 3e3ba480ebfeb..78b6b3c4f9072 100644 --- a/pandas/_testing.py +++ b/pandas/_testing.py @@ -84,6 +84,7 @@ ALL_EA_INT_DTYPES = UNSIGNED_EA_INT_DTYPES + SIGNED_EA_INT_DTYPES FLOAT_DTYPES: List[Dtype] = [float, "float32", "float64"] +FLOAT_EA_DTYPES: List[Dtype] = ["Float32", "Float64"] COMPLEX_DTYPES: List[Dtype] = [complex, "complex64", "complex128"] STRING_DTYPES: List[Dtype] = [str, "str", "U"] diff --git a/pandas/arrays/__init__.py b/pandas/arrays/__init__.py index 61832a8b6d621..0fa070b6e4fc4 100644 --- a/pandas/arrays/__init__.py +++ b/pandas/arrays/__init__.py @@ -7,6 +7,7 @@ BooleanArray, Categorical, DatetimeArray, + FloatingArray, IntegerArray, IntervalArray, PandasArray, @@ -20,6 +21,7 @@ "BooleanArray", "Categorical", "DatetimeArray", + "FloatingArray", "IntegerArray", "IntervalArray", "PandasArray", diff --git a/pandas/conftest.py b/pandas/conftest.py index 604815d496f80..79e6eb42dfebf 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -984,6 +984,17 @@ def float_dtype(request): return request.param +@pytest.fixture(params=tm.FLOAT_EA_DTYPES) +def float_ea_dtype(request): + """ + Parameterized fixture for float dtypes. + + * 'Float32' + * 'Float64' + """ + return request.param + + @pytest.fixture(params=tm.COMPLEX_DTYPES) def complex_dtype(request): """ diff --git a/pandas/core/api.py b/pandas/core/api.py index 348e9206d6e19..67e86c2076329 100644 --- a/pandas/core/api.py +++ b/pandas/core/api.py @@ -14,6 +14,7 @@ from pandas.core.algorithms import factorize, unique, value_counts from pandas.core.arrays import Categorical from pandas.core.arrays.boolean import BooleanDtype +from pandas.core.arrays.floating import Float32Dtype, Float64Dtype from pandas.core.arrays.integer import ( Int8Dtype, Int16Dtype, diff --git a/pandas/core/arrays/__init__.py b/pandas/core/arrays/__init__.py index 1d538824e6d82..e5258a6aecd30 100644 --- a/pandas/core/arrays/__init__.py +++ b/pandas/core/arrays/__init__.py @@ -6,8 +6,10 @@ from pandas.core.arrays.boolean import BooleanArray from pandas.core.arrays.categorical import Categorical from pandas.core.arrays.datetimes import DatetimeArray +from pandas.core.arrays.floating import FloatingArray from pandas.core.arrays.integer import IntegerArray, integer_array from pandas.core.arrays.interval import IntervalArray +from pandas.core.arrays.masked import BaseMaskedArray from pandas.core.arrays.numpy_ import PandasArray, PandasDtype from pandas.core.arrays.period import PeriodArray, period_array from pandas.core.arrays.sparse import SparseArray @@ -18,9 +20,11 @@ "ExtensionArray", "ExtensionOpsMixin", "ExtensionScalarOpsMixin", + "BaseMaskedArray", "BooleanArray", "Categorical", "DatetimeArray", + "FloatingArray", "IntegerArray", "integer_array", "IntervalArray", diff --git a/pandas/core/arrays/boolean.py b/pandas/core/arrays/boolean.py index 0a6a65bbbd5a0..dd750bce7842e 100644 --- a/pandas/core/arrays/boolean.py +++ b/pandas/core/arrays/boolean.py @@ -58,8 +58,9 @@ class BooleanDtype(BaseMaskedDtype): name = "boolean" + # mypy: https://github.com/python/mypy/issues/4125 @property - def type(self) -> Type[np.bool_]: + def type(self) -> Type: # type: ignore[override] return np.bool_ @property @@ -606,10 +607,9 @@ def logical_method(self, other): def _create_comparison_method(cls, op): @ops.unpack_zerodim_and_defer(op.__name__) def cmp_method(self, other): - from pandas.arrays import IntegerArray + from pandas.arrays import FloatingArray, IntegerArray - if isinstance(other, IntegerArray): - # Rely on pandas to unbox and dispatch to us. + if isinstance(other, (IntegerArray, FloatingArray)): return NotImplemented mask = None diff --git a/pandas/core/arrays/floating.py b/pandas/core/arrays/floating.py new file mode 100644 index 0000000000000..c3710196a8611 --- /dev/null +++ b/pandas/core/arrays/floating.py @@ -0,0 +1,618 @@ +import numbers +from typing import TYPE_CHECKING, List, Optional, Tuple, Type, Union +import warnings + +import numpy as np + +from pandas._libs import lib, missing as libmissing +from pandas._typing import ArrayLike, DtypeObj +from pandas.compat import set_function_name +from pandas.compat.numpy import function as nv +from pandas.util._decorators import cache_readonly + +from pandas.core.dtypes.cast import astype_nansafe +from pandas.core.dtypes.common import ( + is_bool_dtype, + is_datetime64_dtype, + is_float, + is_float_dtype, + is_integer, + is_integer_dtype, + is_list_like, + is_object_dtype, + pandas_dtype, +) +from pandas.core.dtypes.dtypes import register_extension_dtype +from pandas.core.dtypes.missing import isna + +from pandas.core import nanops, ops +from pandas.core.array_algos import masked_reductions +from pandas.core.ops import invalid_comparison +from pandas.core.ops.common import unpack_zerodim_and_defer +from pandas.core.tools.numeric import to_numeric + +from .masked import BaseMaskedArray, BaseMaskedDtype + +if TYPE_CHECKING: + import pyarrow # noqa: F401 + + +class FloatingDtype(BaseMaskedDtype): + """ + An ExtensionDtype to hold a single size of floating dtype. + + These specific implementations are subclasses of the non-public + FloatingDtype. For example we have Float32Dtype to represent float32. + + The attributes name & type are set when these subclasses are created. + """ + + def __repr__(self) -> str: + return f"{self.name}Dtype()" + + @property + def _is_numeric(self) -> bool: + return True + + @classmethod + def construct_array_type(cls) -> Type["FloatingArray"]: + """ + Return the array type associated with this dtype. + + Returns + ------- + type + """ + return FloatingArray + + def _get_common_dtype(self, dtypes: List[DtypeObj]) -> Optional[DtypeObj]: + # for now only handle other floating types + if not all(isinstance(t, FloatingDtype) for t in dtypes): + return None + np_dtype = np.find_common_type( + [t.numpy_dtype for t in dtypes], [] # type: ignore[union-attr] + ) + if np.issubdtype(np_dtype, np.floating): + return FLOAT_STR_TO_DTYPE[str(np_dtype)] + return None + + def __from_arrow__( + self, array: Union["pyarrow.Array", "pyarrow.ChunkedArray"] + ) -> "FloatingArray": + """ + Construct FloatingArray from pyarrow Array/ChunkedArray. + """ + import pyarrow # noqa: F811 + + from pandas.core.arrays._arrow_utils import pyarrow_array_to_numpy_and_mask + + pyarrow_type = pyarrow.from_numpy_dtype(self.type) + if not array.type.equals(pyarrow_type): + array = array.cast(pyarrow_type) + + if isinstance(array, pyarrow.Array): + chunks = [array] + else: + # pyarrow.ChunkedArray + chunks = array.chunks + + results = [] + for arr in chunks: + data, mask = pyarrow_array_to_numpy_and_mask(arr, dtype=self.type) + float_arr = FloatingArray(data.copy(), ~mask, copy=False) + results.append(float_arr) + + return FloatingArray._concat_same_type(results) + + +def coerce_to_array( + values, dtype=None, mask=None, copy: bool = False +) -> Tuple[np.ndarray, np.ndarray]: + """ + Coerce the input values array to numpy arrays with a mask. + + Parameters + ---------- + values : 1D list-like + dtype : float dtype + mask : bool 1D array, optional + copy : bool, default False + if True, copy the input + + Returns + ------- + tuple of (values, mask) + """ + # if values is floating numpy array, preserve it's dtype + if dtype is None and hasattr(values, "dtype"): + if is_float_dtype(values.dtype): + dtype = values.dtype + + if dtype is not None: + if isinstance(dtype, str) and dtype.startswith("Float"): + # Avoid DeprecationWarning from NumPy about np.dtype("Float64") + # https://github.com/numpy/numpy/pull/7476 + dtype = dtype.lower() + + if not issubclass(type(dtype), FloatingDtype): + try: + dtype = FLOAT_STR_TO_DTYPE[str(np.dtype(dtype))] + except KeyError as err: + raise ValueError(f"invalid dtype specified {dtype}") from err + + if isinstance(values, FloatingArray): + values, mask = values._data, values._mask + if dtype is not None: + values = values.astype(dtype.numpy_dtype, copy=False) + + if copy: + values = values.copy() + mask = mask.copy() + return values, mask + + values = np.array(values, copy=copy) + if is_object_dtype(values): + inferred_type = lib.infer_dtype(values, skipna=True) + if inferred_type == "empty": + values = np.empty(len(values)) + values.fill(np.nan) + elif inferred_type not in [ + "floating", + "integer", + "mixed-integer", + "integer-na", + "mixed-integer-float", + ]: + raise TypeError(f"{values.dtype} cannot be converted to a FloatingDtype") + + elif is_bool_dtype(values) and is_float_dtype(dtype): + values = np.array(values, dtype=float, copy=copy) + + elif not (is_integer_dtype(values) or is_float_dtype(values)): + raise TypeError(f"{values.dtype} cannot be converted to a FloatingDtype") + + if mask is None: + mask = isna(values) + else: + assert len(mask) == len(values) + + if not values.ndim == 1: + raise TypeError("values must be a 1D list-like") + if not mask.ndim == 1: + raise TypeError("mask must be a 1D list-like") + + # infer dtype if needed + if dtype is None: + dtype = np.dtype("float64") + else: + dtype = dtype.type + + # if we are float, let's make sure that we can + # safely cast + + # we copy as need to coerce here + # TODO should this be a safe cast? + if mask.any(): + values = values.copy() + values[mask] = np.nan + values = values.astype(dtype, copy=False) # , casting="safe") + else: + values = values.astype(dtype, copy=False) # , casting="safe") + + return values, mask + + +class FloatingArray(BaseMaskedArray): + """ + Array of floating (optional missing) values. + + .. versionadded:: 1.2.0 + + .. warning:: + + FloatingArray is currently experimental, and its API or internal + implementation may change without warning. Expecially the behaviour + regarding NaN (distinct from NA missing values) is subject to change. + + We represent a FloatingArray with 2 numpy arrays: + + - data: contains a numpy float array of the appropriate dtype + - mask: a boolean array holding a mask on the data, True is missing + + To construct an FloatingArray from generic array-like input, use + :func:`pandas.array` with one of the float dtypes (see examples). + + See :ref:`integer_na` for more. + + Parameters + ---------- + values : numpy.ndarray + A 1-d float-dtype array. + mask : numpy.ndarray + A 1-d boolean-dtype array indicating missing values. + copy : bool, default False + Whether to copy the `values` and `mask`. + + Attributes + ---------- + None + + Methods + ------- + None + + Returns + ------- + FloatingArray + + Examples + -------- + Create an FloatingArray with :func:`pandas.array`: + + >>> pd.array([0.1, None, 0.3], dtype=pd.Float32Dtype()) + + [0.1, , 0.3] + Length: 3, dtype: Float32 + + String aliases for the dtypes are also available. They are capitalized. + + >>> pd.array([0.1, None, 0.3], dtype="Float32") + + [0.1, , 0.3] + Length: 3, dtype: Float32 + """ + + # The value used to fill '_data' to avoid upcasting + _internal_fill_value = 0.0 + + @cache_readonly + def dtype(self) -> FloatingDtype: + return FLOAT_STR_TO_DTYPE[str(self._data.dtype)] + + def __init__(self, values: np.ndarray, mask: np.ndarray, copy: bool = False): + if not (isinstance(values, np.ndarray) and values.dtype.kind == "f"): + raise TypeError( + "values should be floating numpy array. Use " + "the 'pd.array' function instead" + ) + super().__init__(values, mask, copy=copy) + + @classmethod + def _from_sequence(cls, scalars, dtype=None, copy: bool = False) -> "FloatingArray": + values, mask = coerce_to_array(scalars, dtype=dtype, copy=copy) + return FloatingArray(values, mask) + + @classmethod + def _from_sequence_of_strings( + cls, strings, dtype=None, copy: bool = False + ) -> "FloatingArray": + scalars = to_numeric(strings, errors="raise") + return cls._from_sequence(scalars, dtype, copy) + + _HANDLED_TYPES = (np.ndarray, numbers.Number) + + def __array_ufunc__(self, ufunc, method: str, *inputs, **kwargs): + # For FloatingArray inputs, we apply the ufunc to ._data + # and mask the result. + if method == "reduce": + # Not clear how to handle missing values in reductions. Raise. + raise NotImplementedError("The 'reduce' method is not supported.") + out = kwargs.get("out", ()) + + for x in inputs + out: + if not isinstance(x, self._HANDLED_TYPES + (FloatingArray,)): + return NotImplemented + + # for binary ops, use our custom dunder methods + result = ops.maybe_dispatch_ufunc_to_dunder_op( + self, ufunc, method, *inputs, **kwargs + ) + if result is not NotImplemented: + return result + + mask = np.zeros(len(self), dtype=bool) + inputs2 = [] + for x in inputs: + if isinstance(x, FloatingArray): + mask |= x._mask + inputs2.append(x._data) + else: + inputs2.append(x) + + def reconstruct(x): + # we don't worry about scalar `x` here, since we + # raise for reduce up above. + + # TODO + if is_float_dtype(x.dtype): + m = mask.copy() + return FloatingArray(x, m) + else: + x[mask] = np.nan + return x + + result = getattr(ufunc, method)(*inputs2, **kwargs) + if isinstance(result, tuple): + tuple(reconstruct(x) for x in result) + else: + return reconstruct(result) + + def _coerce_to_array(self, value) -> Tuple[np.ndarray, np.ndarray]: + return coerce_to_array(value, dtype=self.dtype) + + def astype(self, dtype, copy: bool = True) -> ArrayLike: + """ + Cast to a NumPy array or ExtensionArray with 'dtype'. + + Parameters + ---------- + dtype : str or dtype + Typecode or data-type to which the array is cast. + copy : bool, default True + Whether to copy the data, even if not necessary. If False, + a copy is made only if the old dtype does not match the + new dtype. + + Returns + ------- + ndarray or ExtensionArray + NumPy ndarray, or BooleanArray, IntegerArray or FloatingArray with + 'dtype' for its dtype. + + Raises + ------ + TypeError + if incompatible type with an FloatingDtype, equivalent of same_kind + casting + """ + from pandas.core.arrays.string_ import StringArray, StringDtype + + dtype = pandas_dtype(dtype) + + # if the dtype is exactly the same, we can fastpath + if self.dtype == dtype: + # return the same object for copy=False + return self.copy() if copy else self + # if we are astyping to another nullable masked dtype, we can fastpath + if isinstance(dtype, BaseMaskedDtype): + # TODO deal with NaNs + data = self._data.astype(dtype.numpy_dtype, copy=copy) + # mask is copied depending on whether the data was copied, and + # not directly depending on the `copy` keyword + mask = self._mask if data is self._data else self._mask.copy() + return dtype.construct_array_type()(data, mask, copy=False) + elif isinstance(dtype, StringDtype): + return StringArray._from_sequence(self, copy=False) + + # coerce + if is_float_dtype(dtype): + # In astype, we consider dtype=float to also mean na_value=np.nan + kwargs = dict(na_value=np.nan) + elif is_datetime64_dtype(dtype): + kwargs = dict(na_value=np.datetime64("NaT")) + else: + kwargs = {} + + data = self.to_numpy(dtype=dtype, **kwargs) + return astype_nansafe(data, dtype, copy=False) + + def _values_for_argsort(self) -> np.ndarray: + return self._data + + @classmethod + def _create_comparison_method(cls, op): + op_name = op.__name__ + + @unpack_zerodim_and_defer(op.__name__) + def cmp_method(self, other): + from pandas.arrays import BooleanArray, IntegerArray + + mask = None + + if isinstance(other, (BooleanArray, IntegerArray, FloatingArray)): + other, mask = other._data, other._mask + + elif is_list_like(other): + other = np.asarray(other) + if other.ndim > 1: + raise NotImplementedError( + "can only perform ops with 1-d structures" + ) + + if other is libmissing.NA: + # numpy does not handle pd.NA well as "other" scalar (it returns + # a scalar False instead of an array) + # This may be fixed by NA.__array_ufunc__. Revisit this check + # once that's implemented. + result = np.zeros(self._data.shape, dtype="bool") + mask = np.ones(self._data.shape, dtype="bool") + else: + with warnings.catch_warnings(): + # numpy may show a FutureWarning: + # elementwise comparison failed; returning scalar instead, + # but in the future will perform elementwise comparison + # before returning NotImplemented. We fall back to the correct + # behavior today, so that should be fine to ignore. + warnings.filterwarnings("ignore", "elementwise", FutureWarning) + with np.errstate(all="ignore"): + method = getattr(self._data, f"__{op_name}__") + result = method(other) + + if result is NotImplemented: + result = invalid_comparison(self._data, other, op) + + # nans propagate + if mask is None: + mask = self._mask.copy() + else: + mask = self._mask | mask + + return BooleanArray(result, mask) + + name = f"__{op.__name__}__" + return set_function_name(cmp_method, name, cls) + + def _reduce(self, name: str, skipna: bool = True, **kwargs): + data = self._data + mask = self._mask + + if name in {"sum", "prod", "min", "max"}: + op = getattr(masked_reductions, name) + return op(data, mask, skipna=skipna, **kwargs) + + # coerce to a nan-aware float if needed + # (we explicitly use NaN within reductions) + if self._hasna: + data = self.to_numpy("float64", na_value=np.nan) + + op = getattr(nanops, "nan" + name) + result = op(data, axis=0, skipna=skipna, mask=mask, **kwargs) + + if np.isnan(result): + return libmissing.NA + + return result + + def sum(self, skipna=True, min_count=0, **kwargs): + nv.validate_sum((), kwargs) + result = masked_reductions.sum( + values=self._data, mask=self._mask, skipna=skipna, min_count=min_count + ) + return result + + def _maybe_mask_result(self, result, mask, other, op_name: str): + """ + Parameters + ---------- + result : array-like + mask : array-like bool + other : scalar or array-like + op_name : str + """ + # TODO are there cases we don't end up with float? + # if we have a float operand we are by-definition + # a float result + # or our op is a divide + # if (is_float_dtype(other) or is_float(other)) or ( + # op_name in ["rtruediv", "truediv"] + # ): + # result[mask] = np.nan + # return result + + return type(self)(result, mask, copy=False) + + @classmethod + def _create_arithmetic_method(cls, op): + op_name = op.__name__ + + @unpack_zerodim_and_defer(op.__name__) + def floating_arithmetic_method(self, other): + from pandas.arrays import IntegerArray + + omask = None + + if getattr(other, "ndim", 0) > 1: + raise NotImplementedError("can only perform ops with 1-d structures") + + if isinstance(other, (IntegerArray, FloatingArray)): + other, omask = other._data, other._mask + + elif is_list_like(other): + other = np.asarray(other) + if other.ndim > 1: + raise NotImplementedError( + "can only perform ops with 1-d structures" + ) + if len(self) != len(other): + raise ValueError("Lengths must match") + if not (is_float_dtype(other) or is_integer_dtype(other)): + raise TypeError("can only perform ops with numeric values") + + else: + if not (is_float(other) or is_integer(other) or other is libmissing.NA): + raise TypeError("can only perform ops with numeric values") + + if omask is None: + mask = self._mask.copy() + if other is libmissing.NA: + mask |= True + else: + mask = self._mask | omask + + if op_name == "pow": + # 1 ** x is 1. + mask = np.where((self._data == 1) & ~self._mask, False, mask) + # x ** 0 is 1. + if omask is not None: + mask = np.where((other == 0) & ~omask, False, mask) + elif other is not libmissing.NA: + mask = np.where(other == 0, False, mask) + + elif op_name == "rpow": + # 1 ** x is 1. + if omask is not None: + mask = np.where((other == 1) & ~omask, False, mask) + elif other is not libmissing.NA: + mask = np.where(other == 1, False, mask) + # x ** 0 is 1. + mask = np.where((self._data == 0) & ~self._mask, False, mask) + + if other is libmissing.NA: + result = np.ones_like(self._data) + else: + with np.errstate(all="ignore"): + result = op(self._data, other) + + # divmod returns a tuple + if op_name == "divmod": + div, mod = result + return ( + self._maybe_mask_result(div, mask, other, "floordiv"), + self._maybe_mask_result(mod, mask, other, "mod"), + ) + + return self._maybe_mask_result(result, mask, other, op_name) + + name = f"__{op.__name__}__" + return set_function_name(floating_arithmetic_method, name, cls) + + +FloatingArray._add_arithmetic_ops() +FloatingArray._add_comparison_ops() + + +_dtype_docstring = """ +An ExtensionDtype for {dtype} data. + +This dtype uses ``pd.NA`` as missing value indicator. + +Attributes +---------- +None + +Methods +------- +None +""" + +# create the Dtype + + +@register_extension_dtype +class Float32Dtype(FloatingDtype): + type = np.float32 + name = "Float32" + __doc__ = _dtype_docstring.format(dtype="float32") + + +@register_extension_dtype +class Float64Dtype(FloatingDtype): + type = np.float64 + name = "Float64" + __doc__ = _dtype_docstring.format(dtype="float64") + + +FLOAT_STR_TO_DTYPE = { + "float32": Float32Dtype(), + "float64": Float64Dtype(), +} diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py index 8a51b7293082e..04c4c73954671 100644 --- a/pandas/core/arrays/integer.py +++ b/pandas/core/arrays/integer.py @@ -46,10 +46,6 @@ class _IntegerDtype(BaseMaskedDtype): The attributes name & type are set when these subclasses are created. """ - name: str - base = None - type: Type - def __repr__(self) -> str: sign = "U" if self.is_unsigned_integer else "" return f"{sign}Int{8 * self.itemsize}Dtype()" @@ -66,20 +62,6 @@ def is_unsigned_integer(self) -> bool: def _is_numeric(self) -> bool: return True - @cache_readonly - def numpy_dtype(self) -> np.dtype: - """ Return an instance of our numpy dtype """ - return np.dtype(self.type) - - @cache_readonly - def kind(self) -> str: - return self.numpy_dtype.kind - - @cache_readonly - def itemsize(self) -> int: - """ Return the number of bytes in this dtype """ - return self.numpy_dtype.itemsize - @classmethod def construct_array_type(cls) -> Type["IntegerArray"]: """ @@ -106,7 +88,11 @@ def _get_common_dtype(self, dtypes: List[DtypeObj]) -> Optional[DtypeObj]: [t.numpy_dtype if isinstance(t, BaseMaskedDtype) else t for t in dtypes], [] ) if np.issubdtype(np_dtype, np.integer): - return STR_TO_DTYPE[str(np_dtype)] + return INT_STR_TO_DTYPE[str(np_dtype)] + elif np.issubdtype(np_dtype, np.floating): + from pandas.core.arrays.floating import FLOAT_STR_TO_DTYPE + + return FLOAT_STR_TO_DTYPE[str(np_dtype)] return None def __from_arrow__( @@ -214,7 +200,7 @@ def coerce_to_array( if not issubclass(type(dtype), _IntegerDtype): try: - dtype = STR_TO_DTYPE[str(np.dtype(dtype))] + dtype = INT_STR_TO_DTYPE[str(np.dtype(dtype))] except KeyError as err: raise ValueError(f"invalid dtype specified {dtype}") from err @@ -354,7 +340,7 @@ class IntegerArray(BaseMaskedArray): @cache_readonly def dtype(self) -> _IntegerDtype: - return STR_TO_DTYPE[str(self._data.dtype)] + return INT_STR_TO_DTYPE[str(self._data.dtype)] def __init__(self, values: np.ndarray, mask: np.ndarray, copy: bool = False): if not (isinstance(values, np.ndarray) and values.dtype.kind in ["i", "u"]): @@ -513,11 +499,11 @@ def _create_comparison_method(cls, op): @unpack_zerodim_and_defer(op.__name__) def cmp_method(self, other): - from pandas.arrays import BooleanArray + from pandas.core.arrays import BaseMaskedArray, BooleanArray mask = None - if isinstance(other, (BooleanArray, IntegerArray)): + if isinstance(other, BaseMaskedArray): other, mask = other._data, other._mask elif is_list_like(other): @@ -744,7 +730,7 @@ class UInt64Dtype(_IntegerDtype): __doc__ = _dtype_docstring.format(dtype="uint64") -STR_TO_DTYPE: Dict[str, _IntegerDtype] = { +INT_STR_TO_DTYPE: Dict[str, _IntegerDtype] = { "int8": Int8Dtype(), "int16": Int16Dtype(), "int32": Int32Dtype(), diff --git a/pandas/core/arrays/masked.py b/pandas/core/arrays/masked.py index 31274232e2525..97ade0dc70843 100644 --- a/pandas/core/arrays/masked.py +++ b/pandas/core/arrays/masked.py @@ -5,7 +5,7 @@ from pandas._libs import lib, missing as libmissing from pandas._typing import Scalar from pandas.errors import AbstractMethodError -from pandas.util._decorators import doc +from pandas.util._decorators import cache_readonly, doc from pandas.core.dtypes.base import ExtensionDtype from pandas.core.dtypes.common import ( @@ -34,11 +34,25 @@ class BaseMaskedDtype(ExtensionDtype): Base class for dtypes for BasedMaskedArray subclasses. """ + name: str + base = None + type: Type + na_value = libmissing.NA - @property + @cache_readonly def numpy_dtype(self) -> np.dtype: - raise AbstractMethodError + """ Return an instance of our numpy dtype """ + return np.dtype(self.type) + + @cache_readonly + def kind(self) -> str: + return self.numpy_dtype.kind + + @cache_readonly + def itemsize(self) -> int: + """ Return the number of bytes in this dtype """ + return self.numpy_dtype.itemsize @classmethod def construct_array_type(cls) -> Type["BaseMaskedArray"]: diff --git a/pandas/core/arrays/string_.py b/pandas/core/arrays/string_.py index 5e7066e32ea39..5f2b378a10b18 100644 --- a/pandas/core/arrays/string_.py +++ b/pandas/core/arrays/string_.py @@ -198,10 +198,20 @@ def _from_sequence(cls, scalars, dtype=None, copy=False): if dtype: assert dtype == "string" - # convert non-na-likes to str, and nan-likes to StringDtype.na_value - result = lib.ensure_string_array( - scalars, na_value=StringDtype.na_value, copy=copy - ) + from pandas.core.arrays.masked import BaseMaskedArray + + if isinstance(scalars, BaseMaskedArray): + # avoid costly conversion to object dtype + na_values = scalars._mask + result = scalars._data + result = lib.ensure_string_array(result, copy=copy, convert_na_value=False) + result[na_values] = StringDtype.na_value + + else: + # convert non-na-likes to str, and nan-likes to StringDtype.na_value + result = lib.ensure_string_array( + scalars, na_value=StringDtype.na_value, copy=copy + ) # Manually creating new array avoids the validation step in the __init__, so is # faster. Refactor need for validation? diff --git a/pandas/core/construction.py b/pandas/core/construction.py index 3ec5bc90d521d..4751f6076f869 100644 --- a/pandas/core/construction.py +++ b/pandas/core/construction.py @@ -102,6 +102,7 @@ def array( :class:`datetime.datetime` :class:`pandas.arrays.DatetimeArray` :class:`datetime.timedelta` :class:`pandas.arrays.TimedeltaArray` :class:`int` :class:`pandas.arrays.IntegerArray` + :class:`float` :class:`pandas.arrays.FloatingArray` :class:`str` :class:`pandas.arrays.StringArray` :class:`bool` :class:`pandas.arrays.BooleanArray` ============================== ===================================== @@ -114,6 +115,11 @@ def array( string dtype for string data, and nullable-boolean dtype for boolean data. + .. versionchanged:: 1.2.0 + + Pandas now also infers nullable-floating dtype for float-like + input data + copy : bool, default True Whether to copy the data, even if not necessary. Depending on the type of `data`, creating the new array may require @@ -205,6 +211,11 @@ def array( [1, 2, ] Length: 3, dtype: Int64 + >>> pd.array([1.1, 2.2]) + + [1.1, 2.2] + Length: 2, dtype: Float64 + >>> pd.array(["a", None, "c"]) ['a', , 'c'] @@ -231,10 +242,10 @@ def array( If pandas does not infer a dedicated extension type a :class:`arrays.PandasArray` is returned. - >>> pd.array([1.1, 2.2]) + >>> pd.array([1 + 1j, 3 + 2j]) - [1.1, 2.2] - Length: 2, dtype: float64 + [(1+1j), (3+2j)] + Length: 2, dtype: complex128 As mentioned in the "Notes" section, new extension types may be added in the future (by pandas or 3rd party libraries), causing the return @@ -258,6 +269,7 @@ def array( from pandas.core.arrays import ( BooleanArray, DatetimeArray, + FloatingArray, IntegerArray, IntervalArray, PandasArray, @@ -320,6 +332,9 @@ def array( elif inferred_dtype == "integer": return IntegerArray._from_sequence(data, copy=copy) + elif inferred_dtype in ("floating", "mixed-integer-float"): + return FloatingArray._from_sequence(data, copy=copy) + elif inferred_dtype == "boolean": return BooleanArray._from_sequence(data, copy=copy) diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index c5ea24145ae9e..3aa1317f6db6d 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -1156,9 +1156,9 @@ def convert_dtypes( target_int_dtype = "Int64" if is_integer_dtype(input_array.dtype): - from pandas.core.arrays.integer import STR_TO_DTYPE + from pandas.core.arrays.integer import INT_STR_TO_DTYPE - inferred_dtype = STR_TO_DTYPE.get( + inferred_dtype = INT_STR_TO_DTYPE.get( input_array.dtype.name, target_int_dtype ) if not is_integer_dtype(input_array.dtype) and is_numeric_dtype( diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py index acbdbfd7707e3..14184f044ae95 100644 --- a/pandas/core/dtypes/common.py +++ b/pandas/core/dtypes/common.py @@ -83,7 +83,12 @@ def ensure_float(arr): float_arr : The original array cast to the float dtype if possible. Otherwise, the original array is returned. """ - if issubclass(arr.dtype.type, (np.integer, np.bool_)): + if is_extension_array_dtype(arr.dtype): + if is_float_dtype(arr.dtype): + arr = arr.to_numpy(dtype=arr.dtype.numpy_dtype, na_value=np.nan) + else: + arr = arr.to_numpy(dtype="float64", na_value=np.nan) + elif issubclass(arr.dtype.type, (np.integer, np.bool_)): arr = arr.astype(float) return arr diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index 17539cdf451e3..6051aa3022da1 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -20,6 +20,7 @@ from pandas.core.dtypes.cast import maybe_cast_result from pandas.core.dtypes.common import ( + ensure_float, ensure_float64, ensure_int64, ensure_int_or_float, @@ -491,7 +492,7 @@ def _cython_operation( else: values = ensure_int_or_float(values) elif is_numeric and not is_complex_dtype(values): - values = ensure_float64(values) + values = ensure_float64(ensure_float(values)) else: values = values.astype(object) diff --git a/pandas/tests/api/test_api.py b/pandas/tests/api/test_api.py index 54da13c3c620b..541c2988a0636 100644 --- a/pandas/tests/api/test_api.py +++ b/pandas/tests/api/test_api.py @@ -92,6 +92,8 @@ class TestPDApi(Base): "UInt16Dtype", "UInt32Dtype", "UInt64Dtype", + "Float32Dtype", + "Float64Dtype", "NamedAgg", ] diff --git a/pandas/tests/arrays/floating/__init__.py b/pandas/tests/arrays/floating/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/pandas/tests/arrays/floating/conftest.py b/pandas/tests/arrays/floating/conftest.py new file mode 100644 index 0000000000000..1e80518e15941 --- /dev/null +++ b/pandas/tests/arrays/floating/conftest.py @@ -0,0 +1,36 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas.core.arrays.floating import Float32Dtype, Float64Dtype + + +@pytest.fixture(params=[Float32Dtype, Float64Dtype]) +def dtype(request): + return request.param() + + +@pytest.fixture +def data(dtype): + return pd.array( + list(np.arange(0.1, 0.9, 0.1)) + + [pd.NA] + + list(np.arange(1, 9.8, 0.1)) + + [pd.NA] + + [9.9, 10.0], + dtype=dtype, + ) + + +@pytest.fixture +def data_missing(dtype): + return pd.array([np.nan, 0.1], dtype=dtype) + + +@pytest.fixture(params=["data", "data_missing"]) +def all_data(request, data, data_missing): + """Parametrized fixture giving 'data' and 'data_missing'""" + if request.param == "data": + return data + elif request.param == "data_missing": + return data_missing diff --git a/pandas/tests/arrays/floating/test_arithmetic.py b/pandas/tests/arrays/floating/test_arithmetic.py new file mode 100644 index 0000000000000..7ba4da8a5ede9 --- /dev/null +++ b/pandas/tests/arrays/floating/test_arithmetic.py @@ -0,0 +1,182 @@ +import operator + +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm +from pandas.core.arrays import FloatingArray + +# Basic test for the arithmetic array ops +# ----------------------------------------------------------------------------- + + +@pytest.mark.parametrize( + "opname, exp", + [ + ("add", [1.1, 2.2, None, None, 5.5]), + ("mul", [0.1, 0.4, None, None, 2.5]), + ("sub", [0.9, 1.8, None, None, 4.5]), + ("truediv", [10.0, 10.0, None, None, 10.0]), + ("floordiv", [9.0, 9.0, None, None, 10.0]), + ("mod", [0.1, 0.2, None, None, 0.0]), + ], + ids=["add", "mul", "sub", "div", "floordiv", "mod"], +) +def test_array_op(dtype, opname, exp): + a = pd.array([1.0, 2.0, None, 4.0, 5.0], dtype=dtype) + b = pd.array([0.1, 0.2, 0.3, None, 0.5], dtype=dtype) + + op = getattr(operator, opname) + + result = op(a, b) + expected = pd.array(exp, dtype=dtype) + tm.assert_extension_array_equal(result, expected) + + +@pytest.mark.parametrize("zero, negative", [(0, False), (0.0, False), (-0.0, True)]) +def test_divide_by_zero(dtype, zero, negative): + # TODO pending NA/NaN discussion + # https://github.com/pandas-dev/pandas/issues/32265/ + a = pd.array([0, 1, -1, None], dtype=dtype) + result = a / zero + expected = FloatingArray( + np.array([np.nan, np.inf, -np.inf, np.nan], dtype=dtype.numpy_dtype), + np.array([False, False, False, True]), + ) + if negative: + expected *= -1 + tm.assert_extension_array_equal(result, expected) + + +def test_pow_scalar(dtype): + a = pd.array([-1, 0, 1, None, 2], dtype=dtype) + result = a ** 0 + expected = pd.array([1, 1, 1, 1, 1], dtype=dtype) + tm.assert_extension_array_equal(result, expected) + + result = a ** 1 + expected = pd.array([-1, 0, 1, None, 2], dtype=dtype) + tm.assert_extension_array_equal(result, expected) + + result = a ** pd.NA + expected = pd.array([None, None, 1, None, None], dtype=dtype) + tm.assert_extension_array_equal(result, expected) + + result = a ** np.nan + # TODO np.nan should be converted to pd.NA / missing before operation? + expected = FloatingArray( + np.array([np.nan, np.nan, 1, np.nan, np.nan], dtype=dtype.numpy_dtype), + mask=a._mask, + ) + tm.assert_extension_array_equal(result, expected) + + # reversed + a = a[1:] # Can't raise integers to negative powers. + + result = 0 ** a + expected = pd.array([1, 0, None, 0], dtype=dtype) + tm.assert_extension_array_equal(result, expected) + + result = 1 ** a + expected = pd.array([1, 1, 1, 1], dtype=dtype) + tm.assert_extension_array_equal(result, expected) + + result = pd.NA ** a + expected = pd.array([1, None, None, None], dtype=dtype) + tm.assert_extension_array_equal(result, expected) + + result = np.nan ** a + expected = FloatingArray( + np.array([1, np.nan, np.nan, np.nan], dtype=dtype.numpy_dtype), mask=a._mask + ) + tm.assert_extension_array_equal(result, expected) + + +def test_pow_array(dtype): + a = pd.array([0, 0, 0, 1, 1, 1, None, None, None], dtype=dtype) + b = pd.array([0, 1, None, 0, 1, None, 0, 1, None], dtype=dtype) + result = a ** b + expected = pd.array([1, 0, None, 1, 1, 1, 1, None, None], dtype=dtype) + tm.assert_extension_array_equal(result, expected) + + +def test_rpow_one_to_na(): + # https://github.com/pandas-dev/pandas/issues/22022 + # https://github.com/pandas-dev/pandas/issues/29997 + arr = pd.array([np.nan, np.nan], dtype="Float64") + result = np.array([1.0, 2.0]) ** arr + expected = pd.array([1.0, np.nan], dtype="Float64") + tm.assert_extension_array_equal(result, expected) + + +@pytest.mark.parametrize("other", [0, 0.5]) +def test_arith_zero_dim_ndarray(other): + arr = pd.array([1, None, 2], dtype="Float64") + result = arr + np.array(other) + expected = arr + other + tm.assert_equal(result, expected) + + +# Test generic characteristics / errors +# ----------------------------------------------------------------------------- + + +def test_error_invalid_values(data, all_arithmetic_operators): + + op = all_arithmetic_operators + s = pd.Series(data) + ops = getattr(s, op) + + # invalid scalars + msg = ( + r"(:?can only perform ops with numeric values)" + r"|(:?FloatingArray cannot perform the operation mod)" + ) + with pytest.raises(TypeError, match=msg): + ops("foo") + with pytest.raises(TypeError, match=msg): + ops(pd.Timestamp("20180101")) + + # invalid array-likes + with pytest.raises(TypeError, match=msg): + ops(pd.Series("foo", index=s.index)) + + if op != "__rpow__": + # TODO(extension) + # rpow with a datetimelike coerces the integer array incorrectly + msg = ( + "can only perform ops with numeric values|" + "cannot perform .* with this index type: DatetimeArray|" + "Addition/subtraction of integers and integer-arrays " + "with DatetimeArray is no longer supported. *" + ) + with pytest.raises(TypeError, match=msg): + ops(pd.Series(pd.date_range("20180101", periods=len(s)))) + + +# Various +# ----------------------------------------------------------------------------- + + +def test_cross_type_arithmetic(): + + df = pd.DataFrame( + { + "A": pd.array([1, 2, np.nan], dtype="Float64"), + "B": pd.array([1, np.nan, 3], dtype="Float32"), + "C": np.array([1, 2, 3], dtype="float64"), + } + ) + + result = df.A + df.C + expected = pd.Series([2, 4, np.nan], dtype="Float64") + tm.assert_series_equal(result, expected) + + result = (df.A + df.C) * 3 == 12 + expected = pd.Series([False, True, None], dtype="boolean") + tm.assert_series_equal(result, expected) + + result = df.A + df.B + expected = pd.Series([2, np.nan, np.nan], dtype="Float64") + tm.assert_series_equal(result, expected) diff --git a/pandas/tests/arrays/floating/test_astype.py b/pandas/tests/arrays/floating/test_astype.py new file mode 100644 index 0000000000000..828d80d2f9a51 --- /dev/null +++ b/pandas/tests/arrays/floating/test_astype.py @@ -0,0 +1,120 @@ +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm + + +def test_astype(): + # with missing values + arr = pd.array([0.1, 0.2, None], dtype="Float64") + + with pytest.raises(ValueError, match="cannot convert to 'int64'-dtype NumPy"): + arr.astype("int64") + + with pytest.raises(ValueError, match="cannot convert to 'bool'-dtype NumPy"): + arr.astype("bool") + + result = arr.astype("float64") + expected = np.array([0.1, 0.2, np.nan], dtype="float64") + tm.assert_numpy_array_equal(result, expected) + + # no missing values + arr = pd.array([0.0, 1.0, 0.5], dtype="Float64") + result = arr.astype("int64") + expected = np.array([0, 1, 0], dtype="int64") + tm.assert_numpy_array_equal(result, expected) + + result = arr.astype("bool") + expected = np.array([False, True, True], dtype="bool") + tm.assert_numpy_array_equal(result, expected) + + +def test_astype_to_floating_array(): + # astype to FloatingArray + arr = pd.array([0.0, 1.0, None], dtype="Float64") + + result = arr.astype("Float64") + tm.assert_extension_array_equal(result, arr) + result = arr.astype(pd.Float64Dtype()) + tm.assert_extension_array_equal(result, arr) + result = arr.astype("Float32") + expected = pd.array([0.0, 1.0, None], dtype="Float32") + tm.assert_extension_array_equal(result, expected) + + +def test_astype_to_boolean_array(): + # astype to BooleanArray + arr = pd.array([0.0, 1.0, None], dtype="Float64") + + result = arr.astype("boolean") + expected = pd.array([False, True, None], dtype="boolean") + tm.assert_extension_array_equal(result, expected) + result = arr.astype(pd.BooleanDtype()) + tm.assert_extension_array_equal(result, expected) + + +def test_astype_to_integer_array(): + # astype to IntegerArray + arr = pd.array([0.0, 1.5, None], dtype="Float64") + + result = arr.astype("Int64") + expected = pd.array([0, 1, None], dtype="Int64") + tm.assert_extension_array_equal(result, expected) + + +def test_astype_str(): + a = pd.array([0.1, 0.2, None], dtype="Float64") + expected = np.array(["0.1", "0.2", ""], dtype=object) + + tm.assert_numpy_array_equal(a.astype(str), expected) + tm.assert_numpy_array_equal(a.astype("str"), expected) + + +def test_astype_copy(): + arr = pd.array([0.1, 0.2, None], dtype="Float64") + orig = pd.array([0.1, 0.2, None], dtype="Float64") + + # copy=True -> ensure both data and mask are actual copies + result = arr.astype("Float64", copy=True) + assert result is not arr + assert not np.shares_memory(result._data, arr._data) + assert not np.shares_memory(result._mask, arr._mask) + result[0] = 10 + tm.assert_extension_array_equal(arr, orig) + result[0] = pd.NA + tm.assert_extension_array_equal(arr, orig) + + # copy=False + result = arr.astype("Float64", copy=False) + assert result is arr + assert np.shares_memory(result._data, arr._data) + assert np.shares_memory(result._mask, arr._mask) + result[0] = 10 + assert arr[0] == 10 + result[0] = pd.NA + assert arr[0] is pd.NA + + # astype to different dtype -> always needs a copy -> even with copy=False + # we need to ensure that also the mask is actually copied + arr = pd.array([0.1, 0.2, None], dtype="Float64") + orig = pd.array([0.1, 0.2, None], dtype="Float64") + + result = arr.astype("Float32", copy=False) + assert not np.shares_memory(result._data, arr._data) + assert not np.shares_memory(result._mask, arr._mask) + result[0] = 10 + tm.assert_extension_array_equal(arr, orig) + result[0] = pd.NA + tm.assert_extension_array_equal(arr, orig) + + +def test_astype_object(dtype): + arr = pd.array([1.0, pd.NA], dtype=dtype) + + result = arr.astype(object) + expected = np.array([1.0, pd.NA], dtype=object) + tm.assert_numpy_array_equal(result, expected) + # check exact element types + assert isinstance(result[0], float) + assert result[1] is pd.NA diff --git a/pandas/tests/arrays/floating/test_comparison.py b/pandas/tests/arrays/floating/test_comparison.py new file mode 100644 index 0000000000000..5538367f49e5b --- /dev/null +++ b/pandas/tests/arrays/floating/test_comparison.py @@ -0,0 +1,117 @@ +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm +from pandas.tests.extension.base import BaseOpsUtil + + +class TestComparisonOps(BaseOpsUtil): + def _compare_other(self, data, op_name, other): + op = self.get_op_from_name(op_name) + + # array + result = pd.Series(op(data, other)) + expected = pd.Series(op(data._data, other), dtype="boolean") + + # fill the nan locations + expected[data._mask] = pd.NA + + tm.assert_series_equal(result, expected) + + # series + s = pd.Series(data) + result = op(s, other) + + expected = op(pd.Series(data._data), other) + + # fill the nan locations + expected[data._mask] = pd.NA + expected = expected.astype("boolean") + + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("other", [True, False, pd.NA, -1.0, 0.0, 1]) + def test_scalar(self, other, all_compare_operators): + op = self.get_op_from_name(all_compare_operators) + a = pd.array([1.0, 0.0, None], dtype="Float64") + + result = op(a, other) + + if other is pd.NA: + expected = pd.array([None, None, None], dtype="boolean") + else: + values = op(a._data, other) + expected = pd.arrays.BooleanArray(values, a._mask, copy=True) + tm.assert_extension_array_equal(result, expected) + + # ensure we haven't mutated anything inplace + result[0] = pd.NA + tm.assert_extension_array_equal(a, pd.array([1.0, 0.0, None], dtype="Float64")) + + def test_array(self, all_compare_operators): + op = self.get_op_from_name(all_compare_operators) + a = pd.array([0, 1, 2, None, None, None], dtype="Float64") + b = pd.array([0, 1, None, 0, 1, None], dtype="Float64") + + result = op(a, b) + values = op(a._data, b._data) + mask = a._mask | b._mask + + expected = pd.arrays.BooleanArray(values, mask) + tm.assert_extension_array_equal(result, expected) + + # ensure we haven't mutated anything inplace + result[0] = pd.NA + tm.assert_extension_array_equal( + a, pd.array([0, 1, 2, None, None, None], dtype="Float64") + ) + tm.assert_extension_array_equal( + b, pd.array([0, 1, None, 0, 1, None], dtype="Float64") + ) + + def test_compare_with_booleanarray(self, all_compare_operators): + op = self.get_op_from_name(all_compare_operators) + a = pd.array([True, False, None] * 3, dtype="boolean") + b = pd.array([0] * 3 + [1] * 3 + [None] * 3, dtype="Float64") + other = pd.array([False] * 3 + [True] * 3 + [None] * 3, dtype="boolean") + expected = op(a, other) + result = op(a, b) + tm.assert_extension_array_equal(result, expected) + expected = op(other, a) + result = op(b, a) + tm.assert_extension_array_equal(result, expected) + + def test_compare_with_integerarray(self, all_compare_operators): + op = self.get_op_from_name(all_compare_operators) + a = pd.array([0, 1, None] * 3, dtype="Int64") + b = pd.array([0] * 3 + [1] * 3 + [None] * 3, dtype="Float64") + other = b.astype("Int64") + expected = op(a, other) + result = op(a, b) + tm.assert_extension_array_equal(result, expected) + expected = op(other, a) + result = op(b, a) + tm.assert_extension_array_equal(result, expected) + + def test_no_shared_mask(self, data): + result = data + 1 + assert np.shares_memory(result._mask, data._mask) is False + + def test_compare_to_string(self, dtype): + # GH 28930 + s = pd.Series([1, None], dtype=dtype) + result = s == "a" + expected = pd.Series([False, pd.NA], dtype="boolean") + + self.assert_series_equal(result, expected) + + +def test_equals(): + # GH-30652 + # equals is generally tested in /tests/extension/base/methods, but this + # specifically tests that two arrays of the same class but different dtype + # do not evaluate equal + a1 = pd.array([1, 2, None], dtype="Float64") + a2 = pd.array([1, 2, None], dtype="Float32") + assert a1.equals(a2) is False diff --git a/pandas/tests/arrays/floating/test_concat.py b/pandas/tests/arrays/floating/test_concat.py new file mode 100644 index 0000000000000..dcb021045c6a7 --- /dev/null +++ b/pandas/tests/arrays/floating/test_concat.py @@ -0,0 +1,21 @@ +import pytest + +import pandas as pd +import pandas._testing as tm + + +@pytest.mark.parametrize( + "to_concat_dtypes, result_dtype", + [ + (["Float64", "Float64"], "Float64"), + (["Float32", "Float64"], "Float64"), + (["Float32", "Float32"], "Float32"), + ], +) +def test_concat_series(to_concat_dtypes, result_dtype): + + result = pd.concat([pd.Series([1, 2, pd.NA], dtype=t) for t in to_concat_dtypes]) + expected = pd.concat([pd.Series([1, 2, pd.NA], dtype=object)] * 2).astype( + result_dtype + ) + tm.assert_series_equal(result, expected) diff --git a/pandas/tests/arrays/floating/test_construction.py b/pandas/tests/arrays/floating/test_construction.py new file mode 100644 index 0000000000000..69147f8f3a54a --- /dev/null +++ b/pandas/tests/arrays/floating/test_construction.py @@ -0,0 +1,167 @@ +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm +from pandas.core.arrays import FloatingArray +from pandas.core.arrays.floating import Float32Dtype, Float64Dtype + + +def test_uses_pandas_na(): + a = pd.array([1, None], dtype=pd.Float64Dtype()) + assert a[1] is pd.NA + + +def test_floating_array_constructor(): + values = np.array([1, 2, 3, 4], dtype="float64") + mask = np.array([False, False, False, True], dtype="bool") + + result = FloatingArray(values, mask) + expected = pd.array([1, 2, 3, np.nan], dtype="Float64") + tm.assert_extension_array_equal(result, expected) + tm.assert_numpy_array_equal(result._data, values) + tm.assert_numpy_array_equal(result._mask, mask) + + msg = r".* should be .* numpy array. Use the 'pd.array' function instead" + with pytest.raises(TypeError, match=msg): + FloatingArray(values.tolist(), mask) + + with pytest.raises(TypeError, match=msg): + FloatingArray(values, mask.tolist()) + + with pytest.raises(TypeError, match=msg): + FloatingArray(values.astype(int), mask) + + msg = r"__init__\(\) missing 1 required positional argument: 'mask'" + with pytest.raises(TypeError, match=msg): + FloatingArray(values) + + +def test_floating_array_constructor_copy(): + values = np.array([1, 2, 3, 4], dtype="float64") + mask = np.array([False, False, False, True], dtype="bool") + + result = FloatingArray(values, mask) + assert result._data is values + assert result._mask is mask + + result = FloatingArray(values, mask, copy=True) + assert result._data is not values + assert result._mask is not mask + + +def test_to_array(): + result = pd.array([0.1, 0.2, 0.3, 0.4]) + expected = pd.array([0.1, 0.2, 0.3, 0.4], dtype="Float64") + tm.assert_extension_array_equal(result, expected) + + +@pytest.mark.parametrize( + "a, b", + [ + ([1, None], [1, pd.NA]), + ([None], [pd.NA]), + ([None, np.nan], [pd.NA, pd.NA]), + ([1, np.nan], [1, pd.NA]), + ([np.nan], [pd.NA]), + ], +) +def test_to_array_none_is_nan(a, b): + result = pd.array(a, dtype="Float64") + expected = pd.array(b, dtype="Float64") + tm.assert_extension_array_equal(result, expected) + + +def test_to_array_mixed_integer_float(): + result = pd.array([1, 2.0]) + expected = pd.array([1.0, 2.0], dtype="Float64") + tm.assert_extension_array_equal(result, expected) + + result = pd.array([1, None, 2.0]) + expected = pd.array([1.0, None, 2.0], dtype="Float64") + tm.assert_extension_array_equal(result, expected) + + +@pytest.mark.parametrize( + "values", + [ + ["foo", "bar"], + ["1", "2"], + "foo", + 1, + 1.0, + pd.date_range("20130101", periods=2), + np.array(["foo"]), + [[1, 2], [3, 4]], + [np.nan, {"a": 1}], + ], +) +def test_to_array_error(values): + # error in converting existing arrays to FloatingArray + msg = ( + r"(:?.* cannot be converted to a FloatingDtype)" + r"|(:?values must be a 1D list-like)" + r"|(:?Cannot pass scalar)" + ) + with pytest.raises((TypeError, ValueError), match=msg): + pd.array(values, dtype="Float64") + + +def test_to_array_inferred_dtype(): + # if values has dtype -> respect it + result = pd.array(np.array([1, 2], dtype="float32")) + assert result.dtype == Float32Dtype() + + # if values have no dtype -> always float64 + result = pd.array([1.0, 2.0]) + assert result.dtype == Float64Dtype() + + +def test_to_array_dtype_keyword(): + result = pd.array([1, 2], dtype="Float32") + assert result.dtype == Float32Dtype() + + # if values has dtype -> override it + result = pd.array(np.array([1, 2], dtype="float32"), dtype="Float64") + assert result.dtype == Float64Dtype() + + +def test_to_array_integer(): + result = pd.array([1, 2], dtype="Float64") + expected = pd.array([1.0, 2.0], dtype="Float64") + tm.assert_extension_array_equal(result, expected) + + # for integer dtypes, the itemsize is not preserved + # TODO can we specify "floating" in general? + result = pd.array(np.array([1, 2], dtype="int32"), dtype="Float64") + assert result.dtype == Float64Dtype() + + +@pytest.mark.parametrize( + "bool_values, values, target_dtype, expected_dtype", + [ + ([False, True], [0, 1], Float64Dtype(), Float64Dtype()), + ([False, True], [0, 1], "Float64", Float64Dtype()), + ([False, True, np.nan], [0, 1, np.nan], Float64Dtype(), Float64Dtype()), + ], +) +def test_to_array_bool(bool_values, values, target_dtype, expected_dtype): + result = pd.array(bool_values, dtype=target_dtype) + assert result.dtype == expected_dtype + expected = pd.array(values, dtype=target_dtype) + tm.assert_extension_array_equal(result, expected) + + +def test_series_from_float(data): + # construct from our dtype & string dtype + dtype = data.dtype + + # from float + expected = pd.Series(data) + result = pd.Series(data.to_numpy(na_value=np.nan, dtype="float"), dtype=str(dtype)) + tm.assert_series_equal(result, expected) + + # from list + expected = pd.Series(data) + result = pd.Series(np.array(data).tolist(), dtype=str(dtype)) + tm.assert_series_equal(result, expected) diff --git a/pandas/tests/arrays/floating/test_function.py b/pandas/tests/arrays/floating/test_function.py new file mode 100644 index 0000000000000..84c650f880541 --- /dev/null +++ b/pandas/tests/arrays/floating/test_function.py @@ -0,0 +1,154 @@ +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm + + +@pytest.mark.parametrize("ufunc", [np.abs, np.sign]) +# np.sign emits a warning with nans, +@pytest.mark.filterwarnings("ignore:invalid value encountered in sign") +def test_ufuncs_single(ufunc): + a = pd.array([1, 2, -3, np.nan], dtype="Float64") + result = ufunc(a) + expected = pd.array(ufunc(a.astype(float)), dtype="Float64") + tm.assert_extension_array_equal(result, expected) + + s = pd.Series(a) + result = ufunc(s) + expected = pd.Series(expected) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("ufunc", [np.log, np.exp, np.sin, np.cos, np.sqrt]) +def test_ufuncs_single_float(ufunc): + a = pd.array([1.0, 0.2, 3.0, np.nan], dtype="Float64") + with np.errstate(invalid="ignore"): + result = ufunc(a) + expected = pd.array(ufunc(a.astype(float)), dtype="Float64") + tm.assert_extension_array_equal(result, expected) + + s = pd.Series(a) + with np.errstate(invalid="ignore"): + result = ufunc(s) + expected = pd.Series(ufunc(s.astype(float)), dtype="Float64") + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("ufunc", [np.add, np.subtract]) +def test_ufuncs_binary_float(ufunc): + # two FloatingArrays + a = pd.array([1, 0.2, -3, np.nan], dtype="Float64") + result = ufunc(a, a) + expected = pd.array(ufunc(a.astype(float), a.astype(float)), dtype="Float64") + tm.assert_extension_array_equal(result, expected) + + # FloatingArray with numpy array + arr = np.array([1, 2, 3, 4]) + result = ufunc(a, arr) + expected = pd.array(ufunc(a.astype(float), arr), dtype="Float64") + tm.assert_extension_array_equal(result, expected) + + result = ufunc(arr, a) + expected = pd.array(ufunc(arr, a.astype(float)), dtype="Float64") + tm.assert_extension_array_equal(result, expected) + + # FloatingArray with scalar + result = ufunc(a, 1) + expected = pd.array(ufunc(a.astype(float), 1), dtype="Float64") + tm.assert_extension_array_equal(result, expected) + + result = ufunc(1, a) + expected = pd.array(ufunc(1, a.astype(float)), dtype="Float64") + tm.assert_extension_array_equal(result, expected) + + +@pytest.mark.parametrize("values", [[0, 1], [0, None]]) +def test_ufunc_reduce_raises(values): + a = pd.array(values, dtype="Float64") + msg = r"The 'reduce' method is not supported." + with pytest.raises(NotImplementedError, match=msg): + np.add.reduce(a) + + +@pytest.mark.parametrize( + "pandasmethname, kwargs", + [ + ("var", {"ddof": 0}), + ("var", {"ddof": 1}), + ("kurtosis", {}), + ("skew", {}), + ("sem", {}), + ], +) +def test_stat_method(pandasmethname, kwargs): + s = pd.Series(data=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, np.nan, np.nan], dtype="Float64") + pandasmeth = getattr(s, pandasmethname) + result = pandasmeth(**kwargs) + s2 = pd.Series(data=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6], dtype="float64") + pandasmeth = getattr(s2, pandasmethname) + expected = pandasmeth(**kwargs) + assert expected == result + + +def test_value_counts_na(): + arr = pd.array([0.1, 0.2, 0.1, pd.NA], dtype="Float64") + result = arr.value_counts(dropna=False) + expected = pd.Series([2, 1, 1], index=[0.1, 0.2, pd.NA], dtype="Int64") + tm.assert_series_equal(result, expected) + + result = arr.value_counts(dropna=True) + expected = pd.Series([2, 1], index=[0.1, 0.2], dtype="Int64") + tm.assert_series_equal(result, expected) + + +def test_value_counts_empty(): + s = pd.Series([], dtype="Float64") + result = s.value_counts() + idx = pd.Index([], dtype="object") + expected = pd.Series([], index=idx, dtype="Int64") + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("skipna", [True, False]) +@pytest.mark.parametrize("min_count", [0, 4]) +def test_floating_array_sum(skipna, min_count): + arr = pd.array([1, 2, 3, None], dtype="Float64") + result = arr.sum(skipna=skipna, min_count=min_count) + if skipna and min_count == 0: + assert result == 6.0 + else: + assert result is pd.NA + + +@pytest.mark.parametrize( + "values, expected", [([1, 2, 3], 6.0), ([1, 2, 3, None], 6.0), ([None], 0.0)] +) +def test_floating_array_numpy_sum(values, expected): + arr = pd.array(values, dtype="Float64") + result = np.sum(arr) + assert result == expected + + +@pytest.mark.parametrize("op", ["sum", "min", "max", "prod"]) +def test_preserve_dtypes(op): + df = pd.DataFrame( + { + "A": ["a", "b", "b"], + "B": [1, None, 3], + "C": pd.array([0.1, None, 3.0], dtype="Float64"), + } + ) + + # op + result = getattr(df.C, op)() + assert isinstance(result, np.float64) + + # groupby + result = getattr(df.groupby("A"), op)() + + expected = pd.DataFrame( + {"B": np.array([1.0, 3.0]), "C": pd.array([0.1, 3], dtype="Float64")}, + index=pd.Index(["a", "b"], name="A"), + ) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/arrays/floating/test_repr.py b/pandas/tests/arrays/floating/test_repr.py new file mode 100644 index 0000000000000..8767b79242c83 --- /dev/null +++ b/pandas/tests/arrays/floating/test_repr.py @@ -0,0 +1,45 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas.core.arrays.floating import Float32Dtype, Float64Dtype + + +def test_dtypes(dtype): + # smoke tests on auto dtype construction + + np.dtype(dtype.type).kind == "f" + assert dtype.name is not None + + +@pytest.mark.parametrize( + "dtype, expected", + [(Float32Dtype(), "Float32Dtype()"), (Float64Dtype(), "Float64Dtype()")], +) +def test_repr_dtype(dtype, expected): + assert repr(dtype) == expected + + +def test_repr_array(): + result = repr(pd.array([1.0, None, 3.0])) + expected = "\n[1.0, , 3.0]\nLength: 3, dtype: Float64" + assert result == expected + + +def test_repr_array_long(): + data = pd.array([1.0, 2.0, None] * 1000) + expected = """ +[ 1.0, 2.0, , 1.0, 2.0, , 1.0, 2.0, , 1.0, + ... + , 1.0, 2.0, , 1.0, 2.0, , 1.0, 2.0, ] +Length: 3000, dtype: Float64""" + result = repr(data) + assert result == expected + + +def test_frame_repr(data_missing): + + df = pd.DataFrame({"A": data_missing}) + result = repr(df) + expected = " A\n0 \n1 0.1" + assert result == expected diff --git a/pandas/tests/arrays/floating/test_to_numpy.py b/pandas/tests/arrays/floating/test_to_numpy.py new file mode 100644 index 0000000000000..26e5687b1b4a0 --- /dev/null +++ b/pandas/tests/arrays/floating/test_to_numpy.py @@ -0,0 +1,132 @@ +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm +from pandas.core.arrays import FloatingArray + + +@pytest.mark.parametrize("box", [True, False], ids=["series", "array"]) +def test_to_numpy(box): + con = pd.Series if box else pd.array + + # default (with or without missing values) -> object dtype + arr = con([0.1, 0.2, 0.3], dtype="Float64") + result = arr.to_numpy() + expected = np.array([0.1, 0.2, 0.3], dtype="object") + tm.assert_numpy_array_equal(result, expected) + + arr = con([0.1, 0.2, None], dtype="Float64") + result = arr.to_numpy() + expected = np.array([0.1, 0.2, pd.NA], dtype="object") + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize("box", [True, False], ids=["series", "array"]) +def test_to_numpy_float(box): + con = pd.Series if box else pd.array + + # no missing values -> can convert to float, otherwise raises + arr = con([0.1, 0.2, 0.3], dtype="Float64") + result = arr.to_numpy(dtype="float64") + expected = np.array([0.1, 0.2, 0.3], dtype="float64") + tm.assert_numpy_array_equal(result, expected) + + arr = con([0.1, 0.2, None], dtype="Float64") + with pytest.raises(ValueError, match="cannot convert to 'float64'-dtype"): + result = arr.to_numpy(dtype="float64") + + # need to explicitly specify na_value + result = arr.to_numpy(dtype="float64", na_value=np.nan) + expected = np.array([0.1, 0.2, np.nan], dtype="float64") + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize("box", [True, False], ids=["series", "array"]) +def test_to_numpy_int(box): + con = pd.Series if box else pd.array + + # no missing values -> can convert to int, otherwise raises + arr = con([1.0, 2.0, 3.0], dtype="Float64") + result = arr.to_numpy(dtype="int64") + expected = np.array([1, 2, 3], dtype="int64") + tm.assert_numpy_array_equal(result, expected) + + arr = con([1.0, 2.0, None], dtype="Float64") + with pytest.raises(ValueError, match="cannot convert to 'int64'-dtype"): + result = arr.to_numpy(dtype="int64") + + # automatic casting (floors the values) + arr = con([0.1, 0.9, 1.1], dtype="Float64") + result = arr.to_numpy(dtype="int64") + expected = np.array([0, 0, 1], dtype="int64") + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize("box", [True, False], ids=["series", "array"]) +def test_to_numpy_na_value(box): + con = pd.Series if box else pd.array + + arr = con([0.0, 1.0, None], dtype="Float64") + result = arr.to_numpy(dtype=object, na_value=None) + expected = np.array([0.0, 1.0, None], dtype="object") + tm.assert_numpy_array_equal(result, expected) + + result = arr.to_numpy(dtype=bool, na_value=False) + expected = np.array([False, True, False], dtype="bool") + tm.assert_numpy_array_equal(result, expected) + + result = arr.to_numpy(dtype="int64", na_value=-99) + expected = np.array([0, 1, -99], dtype="int64") + tm.assert_numpy_array_equal(result, expected) + + +def test_to_numpy_na_value_with_nan(): + # array with both NaN and NA -> only fill NA with `na_value` + arr = FloatingArray(np.array([0.0, np.nan, 0.0]), np.array([False, False, True])) + result = arr.to_numpy(dtype="float64", na_value=-1) + expected = np.array([0.0, np.nan, -1.0], dtype="float64") + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize("dtype", ["float64", "float32", "int32", "int64", "bool"]) +@pytest.mark.parametrize("box", [True, False], ids=["series", "array"]) +def test_to_numpy_dtype(box, dtype): + con = pd.Series if box else pd.array + arr = con([0.0, 1.0], dtype="Float64") + + result = arr.to_numpy(dtype=dtype) + expected = np.array([0, 1], dtype=dtype) + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize("dtype", ["float64", "float32", "int32", "int64", "bool"]) +@pytest.mark.parametrize("box", [True, False], ids=["series", "array"]) +def test_to_numpy_na_raises(box, dtype): + con = pd.Series if box else pd.array + arr = con([0.0, 1.0, None], dtype="Float64") + with pytest.raises(ValueError, match=dtype): + arr.to_numpy(dtype=dtype) + + +@pytest.mark.parametrize("box", [True, False], ids=["series", "array"]) +def test_to_numpy_string(box, dtype): + con = pd.Series if box else pd.array + arr = con([0.0, 1.0, None], dtype="Float64") + + result = arr.to_numpy(dtype="str") + expected = np.array([0.0, 1.0, pd.NA], dtype="