|
21 | 21 | from pandas.errors import AbstractMethodError
|
22 | 22 | from pandas.util._decorators import Appender, Substitution
|
23 | 23 |
|
24 |
| -from pandas.core.dtypes.cast import maybe_downcast_to_dtype |
| 24 | +from pandas.core.dtypes.cast import ( |
| 25 | + maybe_convert_objects, maybe_downcast_to_dtype) |
25 | 26 | from pandas.core.dtypes.common import (
|
26 | 27 | ensure_int64, ensure_platform_int, is_bool, is_datetimelike,
|
27 |
| - is_integer_dtype, is_interval_dtype, is_numeric_dtype, is_scalar) |
| 28 | + is_integer_dtype, is_interval_dtype, is_numeric_dtype, is_object_dtype, |
| 29 | + is_scalar) |
28 | 30 | from pandas.core.dtypes.missing import isna, notna
|
29 | 31 |
|
30 | 32 | from pandas._typing import FrameOrSeries
|
@@ -334,7 +336,6 @@ def _decide_output_index(self, output, labels):
|
334 | 336 |
|
335 | 337 | def _wrap_applied_output(self, keys, values, not_indexed_same=False):
|
336 | 338 | from pandas.core.index import _all_indexes_same
|
337 |
| - from pandas.core.tools.numeric import to_numeric |
338 | 339 |
|
339 | 340 | if len(keys) == 0:
|
340 | 341 | return DataFrame(index=keys)
|
@@ -406,7 +407,6 @@ def first_not_none(values):
|
406 | 407 | # provide a reduction (Frame -> Series) if groups are
|
407 | 408 | # unique
|
408 | 409 | if self.squeeze:
|
409 |
| - |
410 | 410 | # assign the name to this series
|
411 | 411 | if singular_series:
|
412 | 412 | values[0].name = keys[0]
|
@@ -481,14 +481,7 @@ def first_not_none(values):
|
481 | 481 | # as we are stacking can easily have object dtypes here
|
482 | 482 | so = self._selected_obj
|
483 | 483 | if so.ndim == 2 and so.dtypes.apply(is_datetimelike).any():
|
484 |
| - result = result.apply( |
485 |
| - lambda x: to_numeric(x, errors='ignore')) |
486 |
| - date_cols = self._selected_obj.select_dtypes( |
487 |
| - include=['datetime', 'timedelta']).columns |
488 |
| - date_cols = date_cols.intersection(result.columns) |
489 |
| - result[date_cols] = (result[date_cols] |
490 |
| - ._convert(datetime=True, |
491 |
| - coerce=True)) |
| 484 | + result = _recast_datetimelike_result(result) |
492 | 485 | else:
|
493 | 486 | result = result._convert(datetime=True)
|
494 | 487 |
|
@@ -1710,3 +1703,35 @@ def _normalize_keyword_aggregation(kwargs):
|
1710 | 1703 | order.append((column,
|
1711 | 1704 | com.get_callable_name(aggfunc) or aggfunc))
|
1712 | 1705 | return aggspec, columns, order
|
| 1706 | + |
| 1707 | + |
| 1708 | +def _recast_datetimelike_result(result: DataFrame) -> DataFrame: |
| 1709 | + """ |
| 1710 | + If we have date/time like in the original, then coerce dates |
| 1711 | + as we are stacking can easily have object dtypes here. |
| 1712 | +
|
| 1713 | + Parameters |
| 1714 | + ---------- |
| 1715 | + result : DataFrame |
| 1716 | +
|
| 1717 | + Returns |
| 1718 | + ------- |
| 1719 | + DataFrame |
| 1720 | +
|
| 1721 | + Notes |
| 1722 | + ----- |
| 1723 | + - Assumes Groupby._selected_obj has ndim==2 and at least one |
| 1724 | + datetimelike column |
| 1725 | + """ |
| 1726 | + result = result.copy() |
| 1727 | + |
| 1728 | + obj_cols = [idx for idx in range(len(result.columns)) |
| 1729 | + if is_object_dtype(result.dtypes[idx])] |
| 1730 | + |
| 1731 | + # See GH#26285 |
| 1732 | + for n in obj_cols: |
| 1733 | + converted = maybe_convert_objects(result.iloc[:, n].values, |
| 1734 | + convert_numeric=False) |
| 1735 | + |
| 1736 | + result.iloc[:, n] = converted |
| 1737 | + return result |
0 commit comments