diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx index 6aa93d9780913..cc0174f795ebe 100644 --- a/pandas/_libs/index.pyx +++ b/pandas/_libs/index.pyx @@ -502,7 +502,7 @@ cdef class DatetimeEngine(Int64Engine): if scalar is NaT: return NaT.value elif isinstance(scalar, _Timestamp): - if scalar._reso == self.reso: + if scalar._creso == self.reso: return scalar.value else: # Note: caller is responsible for catching potential ValueError @@ -570,7 +570,7 @@ cdef class TimedeltaEngine(DatetimeEngine): if scalar is NaT: return NaT.value elif isinstance(scalar, _Timedelta): - if scalar._reso == self.reso: + if scalar._creso == self.reso: return scalar.value else: # Note: caller is responsible for catching potential ValueError diff --git a/pandas/_libs/tslibs/offsets.pyi b/pandas/_libs/tslibs/offsets.pyi index 0390aad23d83a..9317a371cc344 100644 --- a/pandas/_libs/tslibs/offsets.pyi +++ b/pandas/_libs/tslibs/offsets.pyi @@ -109,7 +109,7 @@ def to_offset(freq: _BaseOffsetT) -> _BaseOffsetT: ... def to_offset(freq: timedelta | str) -> BaseOffset: ... class Tick(SingleConstructorOffset): - _reso: int + _creso: int _prefix: str _td64_unit: str def __init__(self, n: int = ..., normalize: bool = ...) -> None: ... diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index 68577113702eb..8bdd3d6ac259e 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -1146,7 +1146,7 @@ cdef class Day(Tick): _prefix = "D" _td64_unit = "D" _period_dtype_code = PeriodDtypeCode.D - _reso = NPY_DATETIMEUNIT.NPY_FR_D + _creso = NPY_DATETIMEUNIT.NPY_FR_D cdef class Hour(Tick): @@ -1154,7 +1154,7 @@ cdef class Hour(Tick): _prefix = "H" _td64_unit = "h" _period_dtype_code = PeriodDtypeCode.H - _reso = NPY_DATETIMEUNIT.NPY_FR_h + _creso = NPY_DATETIMEUNIT.NPY_FR_h cdef class Minute(Tick): @@ -1162,7 +1162,7 @@ cdef class Minute(Tick): _prefix = "T" _td64_unit = "m" _period_dtype_code = PeriodDtypeCode.T - _reso = NPY_DATETIMEUNIT.NPY_FR_m + _creso = NPY_DATETIMEUNIT.NPY_FR_m cdef class Second(Tick): @@ -1170,7 +1170,7 @@ cdef class Second(Tick): _prefix = "S" _td64_unit = "s" _period_dtype_code = PeriodDtypeCode.S - _reso = NPY_DATETIMEUNIT.NPY_FR_s + _creso = NPY_DATETIMEUNIT.NPY_FR_s cdef class Milli(Tick): @@ -1178,7 +1178,7 @@ cdef class Milli(Tick): _prefix = "L" _td64_unit = "ms" _period_dtype_code = PeriodDtypeCode.L - _reso = NPY_DATETIMEUNIT.NPY_FR_ms + _creso = NPY_DATETIMEUNIT.NPY_FR_ms cdef class Micro(Tick): @@ -1186,7 +1186,7 @@ cdef class Micro(Tick): _prefix = "U" _td64_unit = "us" _period_dtype_code = PeriodDtypeCode.U - _reso = NPY_DATETIMEUNIT.NPY_FR_us + _creso = NPY_DATETIMEUNIT.NPY_FR_us cdef class Nano(Tick): @@ -1194,7 +1194,7 @@ cdef class Nano(Tick): _prefix = "N" _td64_unit = "ns" _period_dtype_code = PeriodDtypeCode.N - _reso = NPY_DATETIMEUNIT.NPY_FR_ns + _creso = NPY_DATETIMEUNIT.NPY_FR_ns def delta_to_tick(delta: timedelta) -> Tick: @@ -3394,7 +3394,7 @@ cdef class FY5253Quarter(FY5253Mixin): for qlen in qtr_lens: if qlen * 7 <= tdelta.days: num_qtrs += 1 - tdelta -= (<_Timedelta>Timedelta(days=qlen * 7))._as_creso(norm._reso) + tdelta -= (<_Timedelta>Timedelta(days=qlen * 7))._as_creso(norm._creso) else: break else: diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index 55a3741b9cdff..774fd7f20fed6 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -1693,7 +1693,7 @@ cdef class _Period(PeriodMixin): return NaT try: - inc = delta_to_nanoseconds(other, reso=self.freq._reso, round_ok=False) + inc = delta_to_nanoseconds(other, reso=self.freq._creso, round_ok=False) except ValueError as err: raise IncompatibleFrequency("Input cannot be converted to " f"Period(freq={self.freqstr})") from err diff --git a/pandas/_libs/tslibs/timedeltas.pxd b/pandas/_libs/tslibs/timedeltas.pxd index feec08840cb98..921b30b4f91dc 100644 --- a/pandas/_libs/tslibs/timedeltas.pxd +++ b/pandas/_libs/tslibs/timedeltas.pxd @@ -18,7 +18,7 @@ cdef class _Timedelta(timedelta): int64_t value # nanoseconds bint _is_populated # are my components populated int64_t _d, _h, _m, _s, _ms, _us, _ns - NPY_DATETIMEUNIT _reso + NPY_DATETIMEUNIT _creso cpdef timedelta to_pytimedelta(_Timedelta self) cdef bint _has_ns(self) diff --git a/pandas/_libs/tslibs/timedeltas.pyi b/pandas/_libs/tslibs/timedeltas.pyi index 8babcba747b0c..b40b08eb601a4 100644 --- a/pandas/_libs/tslibs/timedeltas.pyi +++ b/pandas/_libs/tslibs/timedeltas.pyi @@ -78,7 +78,7 @@ def delta_to_nanoseconds( ) -> int: ... class Timedelta(timedelta): - _reso: int + _creso: int min: ClassVar[Timedelta] max: ClassVar[Timedelta] resolution: ClassVar[Timedelta] diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index 2beb3470318b5..3aaf321f301cb 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -240,11 +240,11 @@ cpdef int64_t delta_to_nanoseconds( if is_tick_object(delta): n = delta.n - in_reso = delta._reso + in_reso = delta._creso elif isinstance(delta, _Timedelta): n = delta.value - in_reso = delta._reso + in_reso = delta._creso elif is_timedelta64_object(delta): in_reso = get_datetime64_unit(delta) @@ -339,7 +339,7 @@ cdef convert_to_timedelta64(object ts, str unit): return np.timedelta64(NPY_NAT, "ns") elif isinstance(ts, _Timedelta): # already in the proper format - if ts._reso != NPY_FR_ns: + if ts._creso != NPY_FR_ns: ts = ts._as_unit("ns").asm8 else: ts = np.timedelta64(ts.value, "ns") @@ -740,7 +740,7 @@ cdef bint _validate_ops_compat(other): def _op_unary_method(func, name): def f(self): new_value = func(self.value) - return _timedelta_from_value_and_reso(new_value, self._reso) + return _timedelta_from_value_and_reso(new_value, self._creso) f.__name__ = name return f @@ -792,10 +792,10 @@ def _binary_op_method_timedeltalike(op, name): # Matching numpy, we cast to the higher resolution. Unlike numpy, # we raise instead of silently overflowing during this casting. - if self._reso < other._reso: - self = (<_Timedelta>self)._as_creso(other._reso, round_ok=True) - elif self._reso > other._reso: - other = (<_Timedelta>other)._as_creso(self._reso, round_ok=True) + if self._creso < other._creso: + self = (<_Timedelta>self)._as_creso(other._creso, round_ok=True) + elif self._creso > other._creso: + other = (<_Timedelta>other)._as_creso(self._creso, round_ok=True) res = op(self.value, other.value) if res == NPY_NAT: @@ -803,7 +803,7 @@ def _binary_op_method_timedeltalike(op, name): # TODO: more generally could do an overflowcheck in op? return NaT - return _timedelta_from_value_and_reso(res, reso=self._reso) + return _timedelta_from_value_and_reso(res, reso=self._creso) f.__name__ = name return f @@ -970,7 +970,7 @@ cdef _timedelta_from_value_and_reso(int64_t value, NPY_DATETIMEUNIT reso): td_base.value = value td_base._is_populated = 0 - td_base._reso = reso + td_base._creso = reso return td_base @@ -996,7 +996,7 @@ class MinMaxReso: # i.e. this is on the class, default to nanos return Timedelta(val) else: - return Timedelta._from_value_and_reso(val, obj._reso) + return Timedelta._from_value_and_reso(val, obj._creso) def __set__(self, obj, value): raise AttributeError(f"{self._name} is not settable.") @@ -1022,9 +1022,9 @@ cdef class _Timedelta(timedelta): @property def _unit(self) -> str: """ - The abbreviation associated with self._reso. + The abbreviation associated with self._creso. """ - return npy_unit_to_abbrev(self._reso) + return npy_unit_to_abbrev(self._creso) @property def days(self) -> int: # TODO(cython3): make cdef property @@ -1127,7 +1127,7 @@ cdef class _Timedelta(timedelta): else: return NotImplemented - if self._reso == ots._reso: + if self._creso == ots._creso: return cmp_scalar(self.value, ots.value, op) return self._compare_mismatched_resos(ots, op) @@ -1139,18 +1139,18 @@ cdef class _Timedelta(timedelta): npy_datetimestruct dts_other # dispatch to the datetimestruct utils instead of writing new ones! - pandas_datetime_to_datetimestruct(self.value, self._reso, &dts_self) - pandas_datetime_to_datetimestruct(other.value, other._reso, &dts_other) + pandas_datetime_to_datetimestruct(self.value, self._creso, &dts_self) + pandas_datetime_to_datetimestruct(other.value, other._creso, &dts_other) return cmp_dtstructs(&dts_self, &dts_other, op) cdef bint _has_ns(self): - if self._reso == NPY_FR_ns: + if self._creso == NPY_FR_ns: return self.value % 1000 != 0 - elif self._reso < NPY_FR_ns: + elif self._creso < NPY_FR_ns: # i.e. seconds, millisecond, microsecond return False else: - raise NotImplementedError(self._reso) + raise NotImplementedError(self._creso) cdef _ensure_components(_Timedelta self): """ @@ -1162,7 +1162,7 @@ cdef class _Timedelta(timedelta): cdef: pandas_timedeltastruct tds - pandas_timedelta_to_timedeltastruct(self.value, self._reso, &tds) + pandas_timedelta_to_timedeltastruct(self.value, self._creso, &tds) self._d = tds.days self._h = tds.hrs self._m = tds.min @@ -1194,7 +1194,7 @@ cdef class _Timedelta(timedelta): ----- Any nanosecond resolution will be lost. """ - if self._reso == NPY_FR_ns: + if self._creso == NPY_FR_ns: return timedelta(microseconds=int(self.value) / 1000) # TODO(@WillAyd): is this the right way to use components? @@ -1208,7 +1208,7 @@ cdef class _Timedelta(timedelta): Return a numpy.timedelta64 object with 'ns' precision. """ cdef: - str abbrev = npy_unit_to_abbrev(self._reso) + str abbrev = npy_unit_to_abbrev(self._creso) # TODO: way to create a np.timedelta64 obj with the reso directly # instead of having to get the abbrev? return np.timedelta64(self.value, abbrev) @@ -1548,11 +1548,11 @@ cdef class _Timedelta(timedelta): cdef: int64_t value - if reso == self._reso: + if reso == self._creso: return self try: - value = convert_reso(self.value, self._reso, reso, round_ok=round_ok) + value = convert_reso(self.value, self._creso, reso, round_ok=round_ok) except OverflowError as err: unit = npy_unit_to_abbrev(reso) raise OutOfBoundsTimedelta( @@ -1565,10 +1565,10 @@ cdef class _Timedelta(timedelta): """ If _resos do not match, cast to the higher resolution, raising on overflow. """ - if self._reso > other._reso: - other = other._as_creso(self._reso) - elif self._reso < other._reso: - self = self._as_creso(other._reso) + if self._creso > other._creso: + other = other._as_creso(self._creso) + elif self._creso < other._creso: + self = self._as_creso(other._creso) return self, other @@ -1736,7 +1736,7 @@ class Timedelta(_Timedelta): return cls._from_value_and_reso(new_value, reso=new_reso) elif is_tick_object(value): - new_reso = get_supported_reso(value._reso) + new_reso = get_supported_reso(value._creso) new_value = delta_to_nanoseconds(value, reso=new_reso) return cls._from_value_and_reso(new_value, reso=new_reso) @@ -1769,10 +1769,10 @@ class Timedelta(_Timedelta): else: value, reso = state self.value = value - self._reso = reso + self._creso = reso def __reduce__(self): - object_state = self.value, self._reso + object_state = self.value, self._creso return (_timedelta_unpickle, object_state) @cython.cdivision(True) @@ -1784,11 +1784,11 @@ class Timedelta(_Timedelta): from pandas._libs.tslibs.offsets import to_offset to_offset(freq).nanos # raises on non-fixed freq - unit = delta_to_nanoseconds(to_offset(freq), self._reso) + unit = delta_to_nanoseconds(to_offset(freq), self._creso) arr = np.array([self.value], dtype="i8") result = round_nsint64(arr, mode, unit)[0] - return Timedelta._from_value_and_reso(result, self._reso) + return Timedelta._from_value_and_reso(result, self._creso) def round(self, freq): """ @@ -1852,7 +1852,7 @@ class Timedelta(_Timedelta): return _timedelta_from_value_and_reso( (other * self.value), - reso=self._reso, + reso=self._creso, ) elif is_array(other): @@ -1875,7 +1875,7 @@ class Timedelta(_Timedelta): other = Timedelta(other) if other is NaT: return np.nan - if other._reso != self._reso: + if other._creso != self._creso: self, other = self._maybe_cast_to_matching_resos(other) return self.value / float(other.value) @@ -1884,7 +1884,7 @@ class Timedelta(_Timedelta): if util.is_nan(other): return NaT return Timedelta._from_value_and_reso( - (self.value / other), self._reso + (self.value / other), self._creso ) elif is_array(other): @@ -1902,7 +1902,7 @@ class Timedelta(_Timedelta): other = Timedelta(other) if other is NaT: return np.nan - if self._reso != other._reso: + if self._creso != other._creso: self, other = self._maybe_cast_to_matching_resos(other) return float(other.value) / self.value @@ -1930,14 +1930,14 @@ class Timedelta(_Timedelta): other = Timedelta(other) if other is NaT: return np.nan - if self._reso != other._reso: + if self._creso != other._creso: self, other = self._maybe_cast_to_matching_resos(other) return self.value // other.value elif is_integer_object(other) or is_float_object(other): if util.is_nan(other): return NaT - return type(self)._from_value_and_reso(self.value // other, self._reso) + return type(self)._from_value_and_reso(self.value // other, self._creso) elif is_array(other): if other.ndim == 0: @@ -1975,7 +1975,7 @@ class Timedelta(_Timedelta): other = Timedelta(other) if other is NaT: return np.nan - if self._reso != other._reso: + if self._creso != other._creso: self, other = self._maybe_cast_to_matching_resos(other) return other.value // self.value diff --git a/pandas/_libs/tslibs/timestamps.pxd b/pandas/_libs/tslibs/timestamps.pxd index 09aa682fd57a5..397df11144d60 100644 --- a/pandas/_libs/tslibs/timestamps.pxd +++ b/pandas/_libs/tslibs/timestamps.pxd @@ -24,7 +24,7 @@ cdef class _Timestamp(ABCTimestamp): cdef readonly: int64_t value, nanosecond, year BaseOffset _freq - NPY_DATETIMEUNIT _reso + NPY_DATETIMEUNIT _creso cdef bint _get_start_end_field(self, str field, freq) cdef _get_date_name_field(self, str field, object locale) diff --git a/pandas/_libs/tslibs/timestamps.pyi b/pandas/_libs/tslibs/timestamps.pyi index 35cca3c905606..e916d7eb12dbf 100644 --- a/pandas/_libs/tslibs/timestamps.pyi +++ b/pandas/_libs/tslibs/timestamps.pyi @@ -27,7 +27,7 @@ _DatetimeT = TypeVar("_DatetimeT", bound=datetime) def integer_op_not_supported(obj: object) -> TypeError: ... class Timestamp(datetime): - _reso: int + _creso: int min: ClassVar[Timestamp] max: ClassVar[Timestamp] diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx index 2bcdaadb15771..30ead1d4e3142 100644 --- a/pandas/_libs/tslibs/timestamps.pyx +++ b/pandas/_libs/tslibs/timestamps.pyx @@ -163,7 +163,7 @@ cdef inline _Timestamp create_timestamp_from_ts( ts_base._freq = freq ts_base.year = dts.year ts_base.nanosecond = dts.ps // 1000 - ts_base._reso = reso + ts_base._creso = reso return ts_base @@ -218,9 +218,9 @@ class MinMaxReso: # i.e. this is on the class, default to nanos return cls(val) elif self._name == "resolution": - return Timedelta._from_value_and_reso(val, obj._reso) + return Timedelta._from_value_and_reso(val, obj._creso) else: - return Timestamp._from_value_and_reso(val, obj._reso, tz=None) + return Timestamp._from_value_and_reso(val, obj._creso, tz=None) def __set__(self, obj, value): raise AttributeError(f"{self._name} is not settable.") @@ -257,9 +257,9 @@ cdef class _Timestamp(ABCTimestamp): @property def _unit(self) -> str: """ - The abbreviation associated with self._reso. + The abbreviation associated with self._creso. """ - return npy_unit_to_abbrev(self._reso) + return npy_unit_to_abbrev(self._creso) # ----------------------------------------------------------------- # Constructors @@ -382,7 +382,7 @@ cdef class _Timestamp(ABCTimestamp): raise TypeError( "Cannot compare tz-naive and tz-aware timestamps" ) - if self._reso == ots._reso: + if self._creso == ots._creso: return cmp_scalar(self.value, ots.value, op) return self._compare_mismatched_resos(ots, op) @@ -394,8 +394,8 @@ cdef class _Timestamp(ABCTimestamp): npy_datetimestruct dts_other # dispatch to the datetimestruct utils instead of writing new ones! - pandas_datetime_to_datetimestruct(self.value, self._reso, &dts_self) - pandas_datetime_to_datetimestruct(other.value, other._reso, &dts_other) + pandas_datetime_to_datetimestruct(self.value, self._creso, &dts_self) + pandas_datetime_to_datetimestruct(other.value, other._creso, &dts_other) return cmp_dtstructs(&dts_self, &dts_other, op) cdef bint _compare_outside_nanorange(_Timestamp self, datetime other, @@ -435,17 +435,17 @@ cdef class _Timestamp(ABCTimestamp): # TODO: share this with __sub__, Timedelta.__add__ # Matching numpy, we cast to the higher resolution. Unlike numpy, # we raise instead of silently overflowing during this casting. - if self._reso < other._reso: - self = (<_Timestamp>self)._as_creso(other._reso, round_ok=True) - elif self._reso > other._reso: - other = (<_Timedelta>other)._as_creso(self._reso, round_ok=True) + if self._creso < other._creso: + self = (<_Timestamp>self)._as_creso(other._creso, round_ok=True) + elif self._creso > other._creso: + other = (<_Timedelta>other)._as_creso(self._creso, round_ok=True) nanos = other.value try: new_value = self.value + nanos result = type(self)._from_value_and_reso( - new_value, reso=self._reso, tz=self.tzinfo + new_value, reso=self._creso, tz=self.tzinfo ) except OverflowError as err: # TODO: don't hard-code nanosecond here @@ -524,16 +524,16 @@ cdef class _Timestamp(ABCTimestamp): # Matching numpy, we cast to the higher resolution. Unlike numpy, # we raise instead of silently overflowing during this casting. - if self._reso < other._reso: - self = (<_Timestamp>self)._as_creso(other._reso, round_ok=False) - elif self._reso > other._reso: - other = (<_Timestamp>other)._as_creso(self._reso, round_ok=False) + if self._creso < other._creso: + self = (<_Timestamp>self)._as_creso(other._creso, round_ok=False) + elif self._creso > other._creso: + other = (<_Timestamp>other)._as_creso(self._creso, round_ok=False) # scalar Timestamp/datetime - Timestamp/datetime -> yields a # Timedelta try: res_value = self.value - other.value - return Timedelta._from_value_and_reso(res_value, self._reso) + return Timedelta._from_value_and_reso(res_value, self._creso) except (OverflowError, OutOfBoundsDatetime, OutOfBoundsTimedelta) as err: if isinstance(other, _Timestamp): if both_timestamps: @@ -576,7 +576,7 @@ cdef class _Timestamp(ABCTimestamp): if own_tz is not None and not is_utc(own_tz): pydatetime_to_dtstruct(self, &dts) - val = npy_datetimestruct_to_datetime(self._reso, &dts) + self.nanosecond + val = npy_datetimestruct_to_datetime(self._creso, &dts) + self.nanosecond else: val = self.value return val @@ -600,7 +600,7 @@ cdef class _Timestamp(ABCTimestamp): val = self._maybe_convert_value_to_local() out = get_start_end_field(np.array([val], dtype=np.int64), - field, freqstr, month_kw, self._reso) + field, freqstr, month_kw, self._creso) return out[0] cdef _warn_on_field_deprecation(self, freq, str field): @@ -763,7 +763,7 @@ cdef class _Timestamp(ABCTimestamp): val = self._maybe_convert_value_to_local() out = get_date_name_field(np.array([val], dtype=np.int64), - field, locale=locale, reso=self._reso) + field, locale=locale, reso=self._creso) return out[0] def day_name(self, locale=None) -> str: @@ -912,11 +912,11 @@ cdef class _Timestamp(ABCTimestamp): cdef: local_val = self._maybe_convert_value_to_local() int64_t normalized - int64_t ppd = periods_per_day(self._reso) + int64_t ppd = periods_per_day(self._creso) _Timestamp ts normalized = normalize_i8_stamp(local_val, ppd) - ts = type(self)._from_value_and_reso(normalized, reso=self._reso, tz=None) + ts = type(self)._from_value_and_reso(normalized, reso=self._creso, tz=None) return ts.tz_localize(self.tzinfo) # ----------------------------------------------------------------- @@ -939,10 +939,10 @@ cdef class _Timestamp(ABCTimestamp): reso = NPY_FR_ns else: reso = state[4] - self._reso = reso + self._creso = reso def __reduce__(self): - object_state = self.value, self._freq, self.tzinfo, self._reso + object_state = self.value, self._freq, self.tzinfo, self._creso return (_unpickle_timestamp, object_state) # ----------------------------------------------------------------- @@ -1066,10 +1066,10 @@ cdef class _Timestamp(ABCTimestamp): cdef: int64_t value - if reso == self._reso: + if reso == self._creso: return self - value = convert_reso(self.value, self._reso, reso, round_ok=round_ok) + value = convert_reso(self.value, self._creso, reso, round_ok=round_ok) return type(self)._from_value_and_reso(value, reso=reso, tz=self.tzinfo) def _as_unit(self, str unit, bint round_ok=True): @@ -1108,7 +1108,7 @@ cdef class _Timestamp(ABCTimestamp): # GH 17329 # Note: Naive timestamps will not match datetime.stdlib - denom = periods_per_second(self._reso) + denom = periods_per_second(self._creso) return round(self.value / denom, 6) @@ -1142,7 +1142,7 @@ cdef class _Timestamp(ABCTimestamp): Return a numpy.datetime64 object with 'ns' precision. """ # TODO: find a way to construct dt64 directly from _reso - abbrev = npy_unit_to_abbrev(self._reso) + abbrev = npy_unit_to_abbrev(self._creso) return np.datetime64(self.value, abbrev) def to_numpy(self, dtype=None, copy=False) -> np.datetime64: @@ -1682,7 +1682,7 @@ class Timestamp(_Timestamp): int64_t nanos to_offset(freq).nanos # raises on non-fixed freq - nanos = delta_to_nanoseconds(to_offset(freq), self._reso) + nanos = delta_to_nanoseconds(to_offset(freq), self._creso) if self.tz is not None: value = self.tz_localize(None).value @@ -1693,7 +1693,7 @@ class Timestamp(_Timestamp): # Will only ever contain 1 element for timestamp r = round_nsint64(value, mode, nanos)[0] - result = Timestamp._from_value_and_reso(r, self._reso, None) + result = Timestamp._from_value_and_reso(r, self._creso, None) if self.tz is not None: result = result.tz_localize( self.tz, ambiguous=ambiguous, nonexistent=nonexistent @@ -2099,17 +2099,17 @@ default 'raise' value = tz_localize_to_utc_single(self.value, tz, ambiguous=ambiguous, nonexistent=nonexistent, - reso=self._reso) + reso=self._creso) elif tz is None: # reset tz - value = tz_convert_from_utc_single(self.value, self.tz, reso=self._reso) + value = tz_convert_from_utc_single(self.value, self.tz, reso=self._creso) else: raise TypeError( "Cannot localize tz-aware Timestamp, use tz_convert for conversions" ) - out = type(self)._from_value_and_reso(value, self._reso, tz=tz) + out = type(self)._from_value_and_reso(value, self._creso, tz=tz) if out is not NaT: out._set_freq(self._freq) # avoid warning in constructor return out @@ -2164,7 +2164,7 @@ default 'raise' else: # Same UTC timestamp, different time zone tz = maybe_get_tz(tz) - out = type(self)._from_value_and_reso(self.value, reso=self._reso, tz=tz) + out = type(self)._from_value_and_reso(self.value, reso=self._creso, tz=tz) if out is not NaT: out._set_freq(self._freq) # avoid warning in constructor return out @@ -2245,10 +2245,10 @@ default 'raise' fold = self.fold if tzobj is not None: - value = tz_convert_from_utc_single(value, tzobj, reso=self._reso) + value = tz_convert_from_utc_single(value, tzobj, reso=self._creso) # setup components - pandas_datetime_to_datetimestruct(value, self._reso, &dts) + pandas_datetime_to_datetimestruct(value, self._creso, &dts) dts.ps = self.nanosecond * 1000 # replace @@ -2296,10 +2296,10 @@ default 'raise' ts_input = datetime(**kwargs) ts = convert_datetime_to_tsobject( - ts_input, tzobj, nanos=dts.ps // 1000, reso=self._reso + ts_input, tzobj, nanos=dts.ps // 1000, reso=self._creso ) return create_timestamp_from_ts( - ts.value, dts, tzobj, self._freq, fold, reso=self._reso + ts.value, dts, tzobj, self._freq, fold, reso=self._creso ) def to_julian_date(self) -> np.float64: diff --git a/pandas/_libs/tslibs/tzconversion.pxd b/pandas/_libs/tslibs/tzconversion.pxd index 13735fb5945a4..3a6a6f4e10035 100644 --- a/pandas/_libs/tslibs/tzconversion.pxd +++ b/pandas/_libs/tslibs/tzconversion.pxd @@ -23,7 +23,7 @@ cdef int64_t tz_localize_to_utc_single( cdef class Localizer: cdef: tzinfo tz - NPY_DATETIMEUNIT _reso + NPY_DATETIMEUNIT _creso bint use_utc, use_fixed, use_tzlocal, use_dst, use_pytz ndarray trans Py_ssize_t ntrans diff --git a/pandas/_libs/tslibs/tzconversion.pyx b/pandas/_libs/tslibs/tzconversion.pyx index 28ebce9724da9..953ba10993973 100644 --- a/pandas/_libs/tslibs/tzconversion.pyx +++ b/pandas/_libs/tslibs/tzconversion.pyx @@ -66,7 +66,7 @@ cdef class Localizer: @cython.boundscheck(False) def __cinit__(self, tzinfo tz, NPY_DATETIMEUNIT reso): self.tz = tz - self._reso = reso + self._creso = reso self.use_utc = self.use_tzlocal = self.use_fixed = False self.use_dst = self.use_pytz = False self.ntrans = -1 # placeholder @@ -121,7 +121,7 @@ cdef class Localizer: return utc_val elif self.use_tzlocal: return utc_val + _tz_localize_using_tzinfo_api( - utc_val, self.tz, to_utc=False, reso=self._reso, fold=fold + utc_val, self.tz, to_utc=False, reso=self._creso, fold=fold ) elif self.use_fixed: return utc_val + self.delta diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index ebd440d4b0d24..bcf4b5d58bf74 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -440,7 +440,7 @@ def astype(self, dtype, copy: bool = True): tz=self.tz, freq=self.freq, box="timestamp", - reso=self._reso, + reso=self._creso, ) return converted @@ -1079,7 +1079,7 @@ def _cmp_method(self, other, op): if not is_period_dtype(self.dtype): self = cast(TimelikeOps, self) - if self._reso != other._reso: + if self._creso != other._creso: if not isinstance(other, type(self)): # i.e. Timedelta/Timestamp, cast to ndarray and let # compare_mismatched_resolutions handle broadcasting @@ -2039,7 +2039,7 @@ def _validate_dtype(cls, values, dtype): # -------------------------------------------------------------- @cache_readonly - def _reso(self) -> int: + def _creso(self) -> int: return get_unit_from_dtype(self._ndarray.dtype) @cache_readonly @@ -2068,9 +2068,9 @@ def _as_unit(self: TimelikeOpsT, unit: str) -> TimelikeOpsT: # TODO: annotate other as DatetimeArray | TimedeltaArray | Timestamp | Timedelta # with the return type matching input type. TypeVar? def _ensure_matching_resos(self, other): - if self._reso != other._reso: + if self._creso != other._creso: # Just as with Timestamp/Timedelta, we cast to the higher resolution - if self._reso < other._reso: + if self._creso < other._creso: self = self._as_unit(other._unit) else: other = other._as_unit(self._unit) @@ -2103,7 +2103,7 @@ def _round(self, freq, mode, ambiguous, nonexistent): values = self.view("i8") values = cast(np.ndarray, values) nanos = to_offset(freq).nanos # raises on non-fixed frequencies - nanos = delta_to_nanoseconds(to_offset(freq), self._reso) + nanos = delta_to_nanoseconds(to_offset(freq), self._creso) result_i8 = round_nsint64(values, mode, nanos) result = self._maybe_mask_results(result_i8, fill_value=iNaT) result = result.view(self._ndarray.dtype) diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index fb1a5070c6c0d..9ecd9473c903b 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -131,20 +131,20 @@ def f(self): month_kw = kwds.get("startingMonth", kwds.get("month", 12)) result = fields.get_start_end_field( - values, field, self.freqstr, month_kw, reso=self._reso + values, field, self.freqstr, month_kw, reso=self._creso ) else: - result = fields.get_date_field(values, field, reso=self._reso) + result = fields.get_date_field(values, field, reso=self._creso) # these return a boolean by-definition return result if field in self._object_ops: - result = fields.get_date_name_field(values, field, reso=self._reso) + result = fields.get_date_name_field(values, field, reso=self._creso) result = self._maybe_mask_results(result, fill_value=None) else: - result = fields.get_date_field(values, field, reso=self._reso) + result = fields.get_date_field(values, field, reso=self._creso) result = self._maybe_mask_results( result, fill_value=None, convert="float64" ) @@ -283,7 +283,7 @@ def _simple_new( # type: ignore[override] else: # DatetimeTZDtype. If we have e.g. DatetimeTZDtype[us, UTC], # then values.dtype should be M8[us]. - assert dtype._reso == get_unit_from_dtype(values.dtype) + assert dtype._creso == get_unit_from_dtype(values.dtype) result = super()._simple_new(values, dtype) result._freq = freq @@ -510,7 +510,7 @@ def _check_compatible_with(self, other, setitem: bool = False): def _box_func(self, x: np.datetime64) -> Timestamp | NaTType: # GH#42228 value = x.view("i8") - ts = Timestamp._from_value_and_reso(value, reso=self._reso, tz=self.tz) + ts = Timestamp._from_value_and_reso(value, reso=self._creso, tz=self.tz) # Non-overlapping identity check (left operand type: "Timestamp", # right operand type: "NaTType") if ts is not NaT: # type: ignore[comparison-overlap] @@ -577,11 +577,11 @@ def is_normalized(self) -> bool: """ Returns True if all of the dates are at midnight ("no time") """ - return is_date_array_normalized(self.asi8, self.tz, reso=self._reso) + return is_date_array_normalized(self.asi8, self.tz, reso=self._creso) @property # NB: override with cache_readonly in immutable subclasses def _resolution_obj(self) -> Resolution: - return get_resolution(self.asi8, self.tz, reso=self._reso) + return get_resolution(self.asi8, self.tz, reso=self._creso) # ---------------------------------------------------------------- # Array-Like / EA-Interface Methods @@ -619,7 +619,7 @@ def __iter__(self) -> Iterator: tz=self.tz, freq=self.freq, box="timestamp", - reso=self._reso, + reso=self._creso, ) yield from converted @@ -687,7 +687,7 @@ def _format_native_types( fmt = get_format_datetime64_from_values(self, date_format) return tslib.format_array_from_datetime( - self.asi8, tz=self.tz, format=fmt, na_rep=na_rep, reso=self._reso + self.asi8, tz=self.tz, format=fmt, na_rep=na_rep, reso=self._creso ) # ----------------------------------------------------------------- @@ -773,7 +773,7 @@ def _local_timestamps(self) -> npt.NDArray[np.int64]: if self.tz is None or timezones.is_utc(self.tz): # Avoid the copy that would be made in tzconversion return self.asi8 - return tz_convert_from_utc(self.asi8, self.tz, reso=self._reso) + return tz_convert_from_utc(self.asi8, self.tz, reso=self._creso) def tz_convert(self, tz) -> DatetimeArray: """ @@ -1021,7 +1021,7 @@ def tz_localize( tz, ambiguous=ambiguous, nonexistent=nonexistent, - reso=self._reso, + reso=self._creso, ) new_dates = new_dates.view(f"M8[{self._unit}]") dtype = tz_to_dtype(tz, unit=self._unit) @@ -1047,7 +1047,7 @@ def to_pydatetime(self) -> npt.NDArray[np.object_]: ------- datetimes : ndarray[object] """ - return ints_to_pydatetime(self.asi8, tz=self.tz, reso=self._reso) + return ints_to_pydatetime(self.asi8, tz=self.tz, reso=self._creso) def normalize(self) -> DatetimeArray: """ @@ -1087,7 +1087,7 @@ def normalize(self) -> DatetimeArray: '2014-08-01 00:00:00+05:30'], dtype='datetime64[ns, Asia/Calcutta]', freq=None) """ - new_values = normalize_i8_timestamps(self.asi8, self.tz, reso=self._reso) + new_values = normalize_i8_timestamps(self.asi8, self.tz, reso=self._creso) dt64_values = new_values.view(self._ndarray.dtype) dta = type(self)._simple_new(dt64_values, dtype=dt64_values.dtype) @@ -1242,7 +1242,7 @@ def month_name(self, locale=None) -> npt.NDArray[np.object_]: values = self._local_timestamps() result = fields.get_date_name_field( - values, "month_name", locale=locale, reso=self._reso + values, "month_name", locale=locale, reso=self._creso ) result = self._maybe_mask_results(result, fill_value=None) return result @@ -1286,7 +1286,7 @@ def day_name(self, locale=None) -> npt.NDArray[np.object_]: values = self._local_timestamps() result = fields.get_date_name_field( - values, "day_name", locale=locale, reso=self._reso + values, "day_name", locale=locale, reso=self._creso ) result = self._maybe_mask_results(result, fill_value=None) return result @@ -1303,7 +1303,7 @@ def time(self) -> npt.NDArray[np.object_]: # keeping their timezone and not using UTC timestamps = self._local_timestamps() - return ints_to_pydatetime(timestamps, box="time", reso=self._reso) + return ints_to_pydatetime(timestamps, box="time", reso=self._creso) @property def timetz(self) -> npt.NDArray[np.object_]: @@ -1312,7 +1312,7 @@ def timetz(self) -> npt.NDArray[np.object_]: The time part of the Timestamps. """ - return ints_to_pydatetime(self.asi8, self.tz, box="time", reso=self._reso) + return ints_to_pydatetime(self.asi8, self.tz, box="time", reso=self._creso) @property def date(self) -> npt.NDArray[np.object_]: @@ -1327,7 +1327,7 @@ def date(self) -> npt.NDArray[np.object_]: # keeping their timezone and not using UTC timestamps = self._local_timestamps() - return ints_to_pydatetime(timestamps, box="date", reso=self._reso) + return ints_to_pydatetime(timestamps, box="date", reso=self._creso) def isocalendar(self) -> DataFrame: """ @@ -1366,7 +1366,7 @@ def isocalendar(self) -> DataFrame: from pandas import DataFrame values = self._local_timestamps() - sarray = fields.build_isocalendar_sarray(values, reso=self._reso) + sarray = fields.build_isocalendar_sarray(values, reso=self._creso) iso_calendar_df = DataFrame( sarray, columns=["year", "week", "day"], dtype="UInt32" ) diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index 74cc9e50f97bb..92b9222cfc9bc 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -73,7 +73,7 @@ def _field_accessor(name: str, alias: str, docstring: str): def f(self) -> np.ndarray: values = self.asi8 - result = get_timedelta_field(values, alias, reso=self._reso) + result = get_timedelta_field(values, alias, reso=self._creso) if self._hasna: result = self._maybe_mask_results( result, fill_value=None, convert="float64" @@ -149,7 +149,7 @@ def _box_func(self, x: np.timedelta64) -> Timedelta | NaTType: y = x.view("i8") if y == NaT.value: return NaT - return Timedelta._from_value_and_reso(y, reso=self._reso) + return Timedelta._from_value_and_reso(y, reso=self._creso) @property # error: Return type "dtype" of "dtype" incompatible with return type @@ -795,7 +795,7 @@ def total_seconds(self) -> npt.NDArray[np.float64]: Float64Index([0.0, 86400.0, 172800.0, 259200.0, 345600.0], dtype='float64') """ - pps = periods_per_second(self._reso) + pps = periods_per_second(self._creso) return self._maybe_mask_results(self.asi8 / pps, fill_value=None) def to_pytimedelta(self) -> npt.NDArray[np.object_]: diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index 03025ce342a57..cd1753bc8fec1 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -713,7 +713,7 @@ def __init__(self, unit: str_type | DatetimeTZDtype = "ns", tz=None) -> None: self._tz = tz @cache_readonly - def _reso(self) -> int: + def _creso(self) -> int: """ The NPY_DATETIMEUNIT corresponding to this dtype's resolution. """ diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index bb9d9f69ed38c..247126227c587 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -500,7 +500,7 @@ def _get_time_micros(self) -> npt.NDArray[np.int64]: """ values = self._data._local_timestamps() - reso = self._data._reso + reso = self._data._creso ppd = periods_per_day(reso) frac = values % ppd diff --git a/pandas/tests/arrays/test_datetimes.py b/pandas/tests/arrays/test_datetimes.py index dfe41896c2665..b27d90e43d860 100644 --- a/pandas/tests/arrays/test_datetimes.py +++ b/pandas/tests/arrays/test_datetimes.py @@ -71,7 +71,7 @@ def test_non_nano(self, unit, reso, dtype): dta = DatetimeArray._simple_new(arr, dtype=dtype) assert dta.dtype == dtype - assert dta[0]._reso == reso + assert dta[0]._creso == reso assert tz_compare(dta.tz, dta[0].tz) assert (dta[0] == dta[:1]).all() @@ -124,7 +124,7 @@ def test_std_non_nano(self, unit): # we should match the nano-reso std, but floored to our reso. res = dta.std() - assert res._reso == dta._reso + assert res._creso == dta._creso assert res == dti.std().floor(unit) @pytest.mark.filterwarnings("ignore:Converting to PeriodArray.*:UserWarning") @@ -141,12 +141,12 @@ def test_iter(self, dta): assert type(res) is pd.Timestamp assert res.value == expected.value - assert res._reso == expected._reso + assert res._creso == expected._creso assert res == expected def test_astype_object(self, dta): result = dta.astype(object) - assert all(x._reso == dta._reso for x in result) + assert all(x._creso == dta._creso for x in result) assert all(x == y for x, y in zip(result, dta)) def test_to_pydatetime(self, dta_dti): @@ -240,7 +240,7 @@ def test_add_timedeltalike_scalar_mismatched_reso(self, dta_dti, scalar): dta, dti = dta_dti td = pd.Timedelta(scalar) - exp_reso = max(dta._reso, td._reso) + exp_reso = max(dta._creso, td._creso) exp_unit = npy_unit_to_abbrev(exp_reso) expected = (dti + td)._data._as_unit(exp_unit) diff --git a/pandas/tests/arrays/test_timedeltas.py b/pandas/tests/arrays/test_timedeltas.py index 6c48ee3b6405e..75dff66a91365 100644 --- a/pandas/tests/arrays/test_timedeltas.py +++ b/pandas/tests/arrays/test_timedeltas.py @@ -40,7 +40,7 @@ def test_non_nano(self, unit, reso): tda = TimedeltaArray._simple_new(arr, dtype=arr.dtype) assert tda.dtype == arr.dtype - assert tda[0]._reso == reso + assert tda[0]._creso == reso @pytest.mark.parametrize("field", TimedeltaArray._field_ops) def test_fields(self, tda, field): @@ -80,23 +80,23 @@ def test_timedelta_array_total_seconds(self): def test_add_nat_datetimelike_scalar(self, nat, tda): result = tda + nat assert isinstance(result, DatetimeArray) - assert result._reso == tda._reso + assert result._creso == tda._creso assert result.isna().all() result = nat + tda assert isinstance(result, DatetimeArray) - assert result._reso == tda._reso + assert result._creso == tda._creso assert result.isna().all() def test_add_pdnat(self, tda): result = tda + pd.NaT assert isinstance(result, TimedeltaArray) - assert result._reso == tda._reso + assert result._creso == tda._creso assert result.isna().all() result = pd.NaT + tda assert isinstance(result, TimedeltaArray) - assert result._reso == tda._reso + assert result._creso == tda._creso assert result.isna().all() # TODO: 2022-07-11 this is the only test that gets to DTA.tz_convert @@ -130,28 +130,28 @@ def test_mul_scalar(self, tda): result = tda * other expected = TimedeltaArray._simple_new(tda._ndarray * other, dtype=tda.dtype) tm.assert_extension_array_equal(result, expected) - assert result._reso == tda._reso + assert result._creso == tda._creso def test_mul_listlike(self, tda): other = np.arange(len(tda)) result = tda * other expected = TimedeltaArray._simple_new(tda._ndarray * other, dtype=tda.dtype) tm.assert_extension_array_equal(result, expected) - assert result._reso == tda._reso + assert result._creso == tda._creso def test_mul_listlike_object(self, tda): other = np.arange(len(tda)) result = tda * other.astype(object) expected = TimedeltaArray._simple_new(tda._ndarray * other, dtype=tda.dtype) tm.assert_extension_array_equal(result, expected) - assert result._reso == tda._reso + assert result._creso == tda._creso def test_div_numeric_scalar(self, tda): other = 2 result = tda / other expected = TimedeltaArray._simple_new(tda._ndarray / other, dtype=tda.dtype) tm.assert_extension_array_equal(result, expected) - assert result._reso == tda._reso + assert result._creso == tda._creso def test_div_td_scalar(self, tda): other = timedelta(seconds=1) @@ -164,7 +164,7 @@ def test_div_numeric_array(self, tda): result = tda / other expected = TimedeltaArray._simple_new(tda._ndarray / other, dtype=tda.dtype) tm.assert_extension_array_equal(result, expected) - assert result._reso == tda._reso + assert result._creso == tda._creso def test_div_td_array(self, tda): other = tda._ndarray + tda._ndarray[-1] diff --git a/pandas/tests/dtypes/test_dtypes.py b/pandas/tests/dtypes/test_dtypes.py index aeae5fec481ec..7f6ec8b328c87 100644 --- a/pandas/tests/dtypes/test_dtypes.py +++ b/pandas/tests/dtypes/test_dtypes.py @@ -268,7 +268,7 @@ def test_hash_vs_equality(self, dtype): def test_construction_non_nanosecond(self): res = DatetimeTZDtype("ms", "US/Eastern") assert res.unit == "ms" - assert res._reso == NpyDatetimeUnit.NPY_FR_ms.value + assert res._creso == NpyDatetimeUnit.NPY_FR_ms.value assert res.str == "|M8[ms]" assert str(res) == "datetime64[ms, US/Eastern]" diff --git a/pandas/tests/scalar/timedelta/test_constructors.py b/pandas/tests/scalar/timedelta/test_constructors.py index 4e1d1c696b25c..7540813fd302b 100644 --- a/pandas/tests/scalar/timedelta/test_constructors.py +++ b/pandas/tests/scalar/timedelta/test_constructors.py @@ -45,12 +45,12 @@ def test_from_td64_retain_resolution(): td = Timedelta(obj) assert td.value == obj.view("i8") - assert td._reso == NpyDatetimeUnit.NPY_FR_ms.value + assert td._creso == NpyDatetimeUnit.NPY_FR_ms.value # Case where we cast to nearest-supported reso obj2 = np.timedelta64(1234, "D") td2 = Timedelta(obj2) - assert td2._reso == NpyDatetimeUnit.NPY_FR_s.value + assert td2._creso == NpyDatetimeUnit.NPY_FR_s.value assert td2 == obj2 assert td2.days == 1234 @@ -58,7 +58,7 @@ def test_from_td64_retain_resolution(): obj3 = np.timedelta64(1000000000000000000, "us") td3 = Timedelta(obj3) assert td3.total_seconds() == 1000000000000 - assert td3._reso == NpyDatetimeUnit.NPY_FR_us.value + assert td3._creso == NpyDatetimeUnit.NPY_FR_us.value def test_from_pytimedelta_us_reso(): @@ -66,31 +66,31 @@ def test_from_pytimedelta_us_reso(): td = timedelta(days=4, minutes=3) result = Timedelta(td) assert result.to_pytimedelta() == td - assert result._reso == NpyDatetimeUnit.NPY_FR_us.value + assert result._creso == NpyDatetimeUnit.NPY_FR_us.value def test_from_tick_reso(): tick = offsets.Nano() - assert Timedelta(tick)._reso == NpyDatetimeUnit.NPY_FR_ns.value + assert Timedelta(tick)._creso == NpyDatetimeUnit.NPY_FR_ns.value tick = offsets.Micro() - assert Timedelta(tick)._reso == NpyDatetimeUnit.NPY_FR_us.value + assert Timedelta(tick)._creso == NpyDatetimeUnit.NPY_FR_us.value tick = offsets.Milli() - assert Timedelta(tick)._reso == NpyDatetimeUnit.NPY_FR_ms.value + assert Timedelta(tick)._creso == NpyDatetimeUnit.NPY_FR_ms.value tick = offsets.Second() - assert Timedelta(tick)._reso == NpyDatetimeUnit.NPY_FR_s.value + assert Timedelta(tick)._creso == NpyDatetimeUnit.NPY_FR_s.value # everything above Second gets cast to the closest supported reso: second tick = offsets.Minute() - assert Timedelta(tick)._reso == NpyDatetimeUnit.NPY_FR_s.value + assert Timedelta(tick)._creso == NpyDatetimeUnit.NPY_FR_s.value tick = offsets.Hour() - assert Timedelta(tick)._reso == NpyDatetimeUnit.NPY_FR_s.value + assert Timedelta(tick)._creso == NpyDatetimeUnit.NPY_FR_s.value tick = offsets.Day() - assert Timedelta(tick)._reso == NpyDatetimeUnit.NPY_FR_s.value + assert Timedelta(tick)._creso == NpyDatetimeUnit.NPY_FR_s.value def test_construction(): @@ -282,7 +282,7 @@ def test_overflow_on_construction(): # used to overflow before non-ns support td = Timedelta(timedelta(days=13 * 19999)) - assert td._reso == NpyDatetimeUnit.NPY_FR_us.value + assert td._creso == NpyDatetimeUnit.NPY_FR_us.value assert td.days == 13 * 19999 diff --git a/pandas/tests/scalar/timedelta/test_timedelta.py b/pandas/tests/scalar/timedelta/test_timedelta.py index 295222320020d..e7f97a7269aa3 100644 --- a/pandas/tests/scalar/timedelta/test_timedelta.py +++ b/pandas/tests/scalar/timedelta/test_timedelta.py @@ -34,27 +34,27 @@ def test_as_unit(self): res = td._as_unit("us") assert res.value == td.value // 1000 - assert res._reso == NpyDatetimeUnit.NPY_FR_us.value + assert res._creso == NpyDatetimeUnit.NPY_FR_us.value rt = res._as_unit("ns") assert rt.value == td.value - assert rt._reso == td._reso + assert rt._creso == td._creso res = td._as_unit("ms") assert res.value == td.value // 1_000_000 - assert res._reso == NpyDatetimeUnit.NPY_FR_ms.value + assert res._creso == NpyDatetimeUnit.NPY_FR_ms.value rt = res._as_unit("ns") assert rt.value == td.value - assert rt._reso == td._reso + assert rt._creso == td._creso res = td._as_unit("s") assert res.value == td.value // 1_000_000_000 - assert res._reso == NpyDatetimeUnit.NPY_FR_s.value + assert res._creso == NpyDatetimeUnit.NPY_FR_s.value rt = res._as_unit("ns") assert rt.value == td.value - assert rt._reso == td._reso + assert rt._creso == td._creso def test_as_unit_overflows(self): # microsecond that would be just out of bounds for nano @@ -67,7 +67,7 @@ def test_as_unit_overflows(self): res = td._as_unit("ms") assert res.value == us // 1000 - assert res._reso == NpyDatetimeUnit.NPY_FR_ms.value + assert res._creso == NpyDatetimeUnit.NPY_FR_ms.value def test_as_unit_rounding(self): td = Timedelta(microseconds=1500) @@ -76,7 +76,7 @@ def test_as_unit_rounding(self): expected = Timedelta(milliseconds=1) assert res == expected - assert res._reso == NpyDatetimeUnit.NPY_FR_ms.value + assert res._creso == NpyDatetimeUnit.NPY_FR_ms.value assert res.value == 1 with pytest.raises(ValueError, match="Cannot losslessly convert units"): @@ -131,26 +131,26 @@ def test_from_value_and_reso(self, unit, val): # Just checking that the fixture is giving us what we asked for td = Timedelta._from_value_and_reso(val, unit) assert td.value == val - assert td._reso == unit + assert td._creso == unit assert td.days == 106752 def test_unary_non_nano(self, td, unit): - assert abs(td)._reso == unit - assert (-td)._reso == unit - assert (+td)._reso == unit + assert abs(td)._creso == unit + assert (-td)._creso == unit + assert (+td)._creso == unit def test_sub_preserves_reso(self, td, unit): res = td - td expected = Timedelta._from_value_and_reso(0, unit) assert res == expected - assert res._reso == unit + assert res._creso == unit def test_mul_preserves_reso(self, td, unit): # The td fixture should always be far from the implementation # bound, so doubling does not risk overflow. res = td * 2 assert res.value == td.value * 2 - assert res._reso == unit + assert res._creso == unit def test_cmp_cross_reso(self, td): # numpy gets this wrong because of silent overflow @@ -208,11 +208,11 @@ def test_truediv_numeric(self, td): res = td / 2 assert res.value == td.value / 2 - assert res._reso == td._reso + assert res._creso == td._creso res = td / 2.0 assert res.value == td.value / 2 - assert res._reso == td._reso + assert res._creso == td._creso def test_floordiv_timedeltalike(self, td): assert td // td == 1 @@ -242,21 +242,21 @@ def test_floordiv_numeric(self, td): res = td // 2 assert res.value == td.value // 2 - assert res._reso == td._reso + assert res._creso == td._creso res = td // 2.0 assert res.value == td.value // 2 - assert res._reso == td._reso + assert res._creso == td._creso assert td // np.array(np.nan) is NaT res = td // np.array(2) assert res.value == td.value // 2 - assert res._reso == td._reso + assert res._creso == td._creso res = td // np.array(2.0) assert res.value == td.value // 2 - assert res._reso == td._reso + assert res._creso == td._creso def test_addsub_mismatched_reso(self, td): # need to cast to since td is out of bounds for ns, so @@ -265,19 +265,19 @@ def test_addsub_mismatched_reso(self, td): # td is out of bounds for ns result = td + other - assert result._reso == other._reso + assert result._creso == other._creso assert result.days == td.days + 1 result = other + td - assert result._reso == other._reso + assert result._creso == other._creso assert result.days == td.days + 1 result = td - other - assert result._reso == other._reso + assert result._creso == other._creso assert result.days == td.days - 1 result = other - td - assert result._reso == other._reso + assert result._creso == other._creso assert result.days == 1 - td.days other2 = Timedelta(500) @@ -293,32 +293,32 @@ def test_addsub_mismatched_reso(self, td): def test_min(self, td): assert td.min <= td - assert td.min._reso == td._reso + assert td.min._creso == td._creso assert td.min.value == NaT.value + 1 def test_max(self, td): assert td.max >= td - assert td.max._reso == td._reso + assert td.max._creso == td._creso assert td.max.value == np.iinfo(np.int64).max def test_resolution(self, td): - expected = Timedelta._from_value_and_reso(1, td._reso) + expected = Timedelta._from_value_and_reso(1, td._creso) result = td.resolution assert result == expected - assert result._reso == expected._reso + assert result._creso == expected._creso def test_timedelta_class_min_max_resolution(): # when accessed on the class (as opposed to an instance), we default # to nanoseconds assert Timedelta.min == Timedelta(NaT.value + 1) - assert Timedelta.min._reso == NpyDatetimeUnit.NPY_FR_ns.value + assert Timedelta.min._creso == NpyDatetimeUnit.NPY_FR_ns.value assert Timedelta.max == Timedelta(np.iinfo(np.int64).max) - assert Timedelta.max._reso == NpyDatetimeUnit.NPY_FR_ns.value + assert Timedelta.max._creso == NpyDatetimeUnit.NPY_FR_ns.value assert Timedelta.resolution == Timedelta(1) - assert Timedelta.resolution._reso == NpyDatetimeUnit.NPY_FR_ns.value + assert Timedelta.resolution._creso == NpyDatetimeUnit.NPY_FR_ns.value class TestTimedeltaUnaryOps: @@ -759,15 +759,15 @@ def test_round_non_nano(self, unit): res = td.round("min") assert res == Timedelta("1 days 02:35:00") - assert res._reso == td._reso + assert res._creso == td._creso res = td.floor("min") assert res == Timedelta("1 days 02:34:00") - assert res._reso == td._reso + assert res._creso == td._creso res = td.ceil("min") assert res == Timedelta("1 days 02:35:00") - assert res._reso == td._reso + assert res._creso == td._creso def test_identity(self): diff --git a/pandas/tests/scalar/timestamp/test_constructors.py b/pandas/tests/scalar/timestamp/test_constructors.py index 9b7d8d82a9b98..757abbf3e662c 100644 --- a/pandas/tests/scalar/timestamp/test_constructors.py +++ b/pandas/tests/scalar/timestamp/test_constructors.py @@ -458,8 +458,8 @@ def test_out_of_bounds_value(self): # We used to raise on these before supporting non-nano us_val = NpyDatetimeUnit.NPY_FR_us.value - assert Timestamp(min_ts_us - one_us)._reso == us_val - assert Timestamp(max_ts_us + one_us)._reso == us_val + assert Timestamp(min_ts_us - one_us)._creso == us_val + assert Timestamp(max_ts_us + one_us)._creso == us_val # https://github.com/numpy/numpy/issues/22346 for why # we can't use the same construction as above with minute resolution @@ -506,7 +506,7 @@ def test_bounds_with_different_units(self): assert ts.value == dt64.view("i8") else: # we chose the closest unit that we _do_ support - assert ts._reso == NpyDatetimeUnit.NPY_FR_s.value + assert ts._creso == NpyDatetimeUnit.NPY_FR_s.value # With more extreme cases, we can't even fit inside second resolution info = np.iinfo(np.int64) diff --git a/pandas/tests/scalar/timestamp/test_timestamp.py b/pandas/tests/scalar/timestamp/test_timestamp.py index 61663f774ced0..4f8c6fcc57186 100644 --- a/pandas/tests/scalar/timestamp/test_timestamp.py +++ b/pandas/tests/scalar/timestamp/test_timestamp.py @@ -716,17 +716,17 @@ def ts(self, dt64): @pytest.fixture def ts_tz(self, ts, tz_aware_fixture): tz = maybe_get_tz(tz_aware_fixture) - return Timestamp._from_value_and_reso(ts.value, ts._reso, tz) + return Timestamp._from_value_and_reso(ts.value, ts._creso, tz) def test_non_nano_construction(self, dt64, ts, reso): assert ts.value == dt64.view("i8") if reso == "s": - assert ts._reso == NpyDatetimeUnit.NPY_FR_s.value + assert ts._creso == NpyDatetimeUnit.NPY_FR_s.value elif reso == "ms": - assert ts._reso == NpyDatetimeUnit.NPY_FR_ms.value + assert ts._creso == NpyDatetimeUnit.NPY_FR_ms.value elif reso == "us": - assert ts._reso == NpyDatetimeUnit.NPY_FR_us.value + assert ts._creso == NpyDatetimeUnit.NPY_FR_us.value def test_non_nano_fields(self, dt64, ts): alt = Timestamp(dt64) @@ -771,13 +771,13 @@ def test_month_name(self, dt64, ts): assert ts.month_name() == alt.month_name() def test_tz_convert(self, ts): - ts = Timestamp._from_value_and_reso(ts.value, ts._reso, utc) + ts = Timestamp._from_value_and_reso(ts.value, ts._creso, utc) tz = pytz.timezone("US/Pacific") result = ts.tz_convert(tz) assert isinstance(result, Timestamp) - assert result._reso == ts._reso + assert result._creso == ts._creso assert tz_compare(result.tz, tz) def test_repr(self, dt64, ts): @@ -845,15 +845,15 @@ def test_cmp_cross_reso_reversed_dt64(self): def test_pickle(self, ts, tz_aware_fixture): tz = tz_aware_fixture tz = maybe_get_tz(tz) - ts = Timestamp._from_value_and_reso(ts.value, ts._reso, tz) + ts = Timestamp._from_value_and_reso(ts.value, ts._creso, tz) rt = tm.round_trip_pickle(ts) - assert rt._reso == ts._reso + assert rt._creso == ts._creso assert rt == ts def test_normalize(self, dt64, ts): alt = Timestamp(dt64) result = ts.normalize() - assert result._reso == ts._reso + assert result._creso == ts._creso assert result == alt.normalize() def test_asm8(self, dt64, ts): @@ -884,24 +884,24 @@ def test_to_period(self, dt64, ts): ) def test_addsub_timedeltalike_non_nano(self, dt64, ts, td): - exp_reso = max(ts._reso, Timedelta(td)._reso) + exp_reso = max(ts._creso, Timedelta(td)._creso) result = ts - td expected = Timestamp(dt64) - td assert isinstance(result, Timestamp) - assert result._reso == exp_reso + assert result._creso == exp_reso assert result == expected result = ts + td expected = Timestamp(dt64) + td assert isinstance(result, Timestamp) - assert result._reso == exp_reso + assert result._creso == exp_reso assert result == expected result = td + ts expected = td + Timestamp(dt64) assert isinstance(result, Timestamp) - assert result._reso == exp_reso + assert result._creso == exp_reso assert result == expected def test_addsub_offset(self, ts_tz): @@ -910,7 +910,7 @@ def test_addsub_offset(self, ts_tz): result = ts_tz + off assert isinstance(result, Timestamp) - assert result._reso == ts_tz._reso + assert result._creso == ts_tz._creso if ts_tz.month == 12 and ts_tz.day == 31: assert result.year == ts_tz.year + 1 else: @@ -922,7 +922,7 @@ def test_addsub_offset(self, ts_tz): result = ts_tz - off assert isinstance(result, Timestamp) - assert result._reso == ts_tz._reso + assert result._creso == ts_tz._creso assert result.year == ts_tz.year - 1 assert result.day == 31 assert result.month == 12 @@ -933,101 +933,101 @@ def test_sub_datetimelike_mismatched_reso(self, ts_tz): ts = ts_tz # choose a unit for `other` that doesn't match ts_tz's; - # this construction ensures we get cases with other._reso < ts._reso - # and cases with other._reso > ts._reso + # this construction ensures we get cases with other._creso < ts._creso + # and cases with other._creso > ts._creso unit = { NpyDatetimeUnit.NPY_FR_us.value: "ms", NpyDatetimeUnit.NPY_FR_ms.value: "s", NpyDatetimeUnit.NPY_FR_s.value: "us", - }[ts._reso] + }[ts._creso] other = ts._as_unit(unit) - assert other._reso != ts._reso + assert other._creso != ts._creso result = ts - other assert isinstance(result, Timedelta) assert result.value == 0 - assert result._reso == max(ts._reso, other._reso) + assert result._creso == max(ts._creso, other._creso) result = other - ts assert isinstance(result, Timedelta) assert result.value == 0 - assert result._reso == max(ts._reso, other._reso) + assert result._creso == max(ts._creso, other._creso) - if ts._reso < other._reso: + if ts._creso < other._creso: # Case where rounding is lossy - other2 = other + Timedelta._from_value_and_reso(1, other._reso) + other2 = other + Timedelta._from_value_and_reso(1, other._creso) exp = ts._as_unit(other._unit) - other2 res = ts - other2 assert res == exp - assert res._reso == max(ts._reso, other._reso) + assert res._creso == max(ts._creso, other._creso) res = other2 - ts assert res == -exp - assert res._reso == max(ts._reso, other._reso) + assert res._creso == max(ts._creso, other._creso) else: - ts2 = ts + Timedelta._from_value_and_reso(1, ts._reso) + ts2 = ts + Timedelta._from_value_and_reso(1, ts._creso) exp = ts2 - other._as_unit(ts2._unit) res = ts2 - other assert res == exp - assert res._reso == max(ts._reso, other._reso) + assert res._creso == max(ts._creso, other._creso) res = other - ts2 assert res == -exp - assert res._reso == max(ts._reso, other._reso) + assert res._creso == max(ts._creso, other._creso) def test_sub_timedeltalike_mismatched_reso(self, ts_tz): # case with non-lossy rounding ts = ts_tz # choose a unit for `other` that doesn't match ts_tz's; - # this construction ensures we get cases with other._reso < ts._reso - # and cases with other._reso > ts._reso + # this construction ensures we get cases with other._creso < ts._creso + # and cases with other._creso > ts._creso unit = { NpyDatetimeUnit.NPY_FR_us.value: "ms", NpyDatetimeUnit.NPY_FR_ms.value: "s", NpyDatetimeUnit.NPY_FR_s.value: "us", - }[ts._reso] + }[ts._creso] other = Timedelta(0)._as_unit(unit) - assert other._reso != ts._reso + assert other._creso != ts._creso result = ts + other assert isinstance(result, Timestamp) assert result == ts - assert result._reso == max(ts._reso, other._reso) + assert result._creso == max(ts._creso, other._creso) result = other + ts assert isinstance(result, Timestamp) assert result == ts - assert result._reso == max(ts._reso, other._reso) + assert result._creso == max(ts._creso, other._creso) - if ts._reso < other._reso: + if ts._creso < other._creso: # Case where rounding is lossy - other2 = other + Timedelta._from_value_and_reso(1, other._reso) + other2 = other + Timedelta._from_value_and_reso(1, other._creso) exp = ts._as_unit(other._unit) + other2 res = ts + other2 assert res == exp - assert res._reso == max(ts._reso, other._reso) + assert res._creso == max(ts._creso, other._creso) res = other2 + ts assert res == exp - assert res._reso == max(ts._reso, other._reso) + assert res._creso == max(ts._creso, other._creso) else: - ts2 = ts + Timedelta._from_value_and_reso(1, ts._reso) + ts2 = ts + Timedelta._from_value_and_reso(1, ts._creso) exp = ts2 + other._as_unit(ts2._unit) res = ts2 + other assert res == exp - assert res._reso == max(ts._reso, other._reso) + assert res._creso == max(ts._creso, other._creso) res = other + ts2 assert res == exp - assert res._reso == max(ts._reso, other._reso) + assert res._creso == max(ts._creso, other._creso) def test_addition_doesnt_downcast_reso(self): # https://github.com/pandas-dev/pandas/pull/48748#pullrequestreview-1122635413 ts = Timestamp(year=2022, month=1, day=1, microsecond=999999)._as_unit("us") td = Timedelta(microseconds=1)._as_unit("us") res = ts + td - assert res._reso == ts._reso + assert res._creso == ts._creso def test_sub_timedelta64_mismatched_reso(self, ts_tz): ts = ts_tz @@ -1035,36 +1035,36 @@ def test_sub_timedelta64_mismatched_reso(self, ts_tz): res = ts + np.timedelta64(1, "ns") exp = ts._as_unit("ns") + np.timedelta64(1, "ns") assert exp == res - assert exp._reso == NpyDatetimeUnit.NPY_FR_ns.value + assert exp._creso == NpyDatetimeUnit.NPY_FR_ns.value def test_min(self, ts): assert ts.min <= ts - assert ts.min._reso == ts._reso + assert ts.min._creso == ts._creso assert ts.min.value == NaT.value + 1 def test_max(self, ts): assert ts.max >= ts - assert ts.max._reso == ts._reso + assert ts.max._creso == ts._creso assert ts.max.value == np.iinfo(np.int64).max def test_resolution(self, ts): - expected = Timedelta._from_value_and_reso(1, ts._reso) + expected = Timedelta._from_value_and_reso(1, ts._creso) result = ts.resolution assert result == expected - assert result._reso == expected._reso + assert result._creso == expected._creso def test_timestamp_class_min_max_resolution(): # when accessed on the class (as opposed to an instance), we default # to nanoseconds assert Timestamp.min == Timestamp(NaT.value + 1) - assert Timestamp.min._reso == NpyDatetimeUnit.NPY_FR_ns.value + assert Timestamp.min._creso == NpyDatetimeUnit.NPY_FR_ns.value assert Timestamp.max == Timestamp(np.iinfo(np.int64).max) - assert Timestamp.max._reso == NpyDatetimeUnit.NPY_FR_ns.value + assert Timestamp.max._creso == NpyDatetimeUnit.NPY_FR_ns.value assert Timestamp.resolution == Timedelta(1) - assert Timestamp.resolution._reso == NpyDatetimeUnit.NPY_FR_ns.value + assert Timestamp.resolution._creso == NpyDatetimeUnit.NPY_FR_ns.value class TestAsUnit: @@ -1075,27 +1075,27 @@ def test_as_unit(self): res = ts._as_unit("us") assert res.value == ts.value // 1000 - assert res._reso == NpyDatetimeUnit.NPY_FR_us.value + assert res._creso == NpyDatetimeUnit.NPY_FR_us.value rt = res._as_unit("ns") assert rt.value == ts.value - assert rt._reso == ts._reso + assert rt._creso == ts._creso res = ts._as_unit("ms") assert res.value == ts.value // 1_000_000 - assert res._reso == NpyDatetimeUnit.NPY_FR_ms.value + assert res._creso == NpyDatetimeUnit.NPY_FR_ms.value rt = res._as_unit("ns") assert rt.value == ts.value - assert rt._reso == ts._reso + assert rt._creso == ts._creso res = ts._as_unit("s") assert res.value == ts.value // 1_000_000_000 - assert res._reso == NpyDatetimeUnit.NPY_FR_s.value + assert res._creso == NpyDatetimeUnit.NPY_FR_s.value rt = res._as_unit("ns") assert rt.value == ts.value - assert rt._reso == ts._reso + assert rt._creso == ts._creso def test_as_unit_overflows(self): # microsecond that would be just out of bounds for nano @@ -1108,7 +1108,7 @@ def test_as_unit_overflows(self): res = ts._as_unit("ms") assert res.value == us // 1000 - assert res._reso == NpyDatetimeUnit.NPY_FR_ms.value + assert res._creso == NpyDatetimeUnit.NPY_FR_ms.value def test_as_unit_rounding(self): ts = Timestamp(1_500_000) # i.e. 1500 microseconds @@ -1117,7 +1117,7 @@ def test_as_unit_rounding(self): expected = Timestamp(1_000_000) # i.e. 1 millisecond assert res == expected - assert res._reso == NpyDatetimeUnit.NPY_FR_ms.value + assert res._creso == NpyDatetimeUnit.NPY_FR_ms.value assert res.value == 1 with pytest.raises(ValueError, match="Cannot losslessly convert units"): diff --git a/pandas/tests/scalar/timestamp/test_timezones.py b/pandas/tests/scalar/timestamp/test_timezones.py index 874575fa9ad4c..a05da73ac3031 100644 --- a/pandas/tests/scalar/timestamp/test_timezones.py +++ b/pandas/tests/scalar/timestamp/test_timezones.py @@ -72,11 +72,11 @@ def test_tz_localize_ambiguous_bool(self, unit): result = ts.tz_localize("US/Central", ambiguous=True) assert result == expected0 - assert result._reso == getattr(NpyDatetimeUnit, f"NPY_FR_{unit}").value + assert result._creso == getattr(NpyDatetimeUnit, f"NPY_FR_{unit}").value result = ts.tz_localize("US/Central", ambiguous=False) assert result == expected1 - assert result._reso == getattr(NpyDatetimeUnit, f"NPY_FR_{unit}").value + assert result._creso == getattr(NpyDatetimeUnit, f"NPY_FR_{unit}").value def test_tz_localize_ambiguous(self): ts = Timestamp("2014-11-02 01:00") @@ -270,7 +270,7 @@ def test_timestamp_tz_localize_nonexistent_shift( assert result == expected.replace(microsecond=0, nanosecond=0) else: assert result == expected - assert result._reso == getattr(NpyDatetimeUnit, f"NPY_FR_{unit}").value + assert result._creso == getattr(NpyDatetimeUnit, f"NPY_FR_{unit}").value @pytest.mark.parametrize("offset", [-1, 1]) @pytest.mark.parametrize("tz_type", ["", "dateutil/"]) diff --git a/pandas/tests/scalar/timestamp/test_unary_ops.py b/pandas/tests/scalar/timestamp/test_unary_ops.py index 9c376c7a13efc..6d9cfa51d2210 100644 --- a/pandas/tests/scalar/timestamp/test_unary_ops.py +++ b/pandas/tests/scalar/timestamp/test_unary_ops.py @@ -154,7 +154,7 @@ def test_ceil(self, unit): result = dt.ceil("D") expected = Timestamp("20130102") assert result == expected - assert result._reso == dt._reso + assert result._creso == dt._creso @pytest.mark.parametrize("unit", ["ns", "us", "ms", "s"]) def test_floor(self, unit): @@ -162,7 +162,7 @@ def test_floor(self, unit): result = dt.floor("D") expected = Timestamp("20130101") assert result == expected - assert result._reso == dt._reso + assert result._creso == dt._creso @pytest.mark.parametrize("method", ["ceil", "round", "floor"]) @pytest.mark.parametrize( @@ -176,14 +176,14 @@ def test_round_dst_border_ambiguous(self, method, unit): # result = getattr(ts, method)("H", ambiguous=True) assert result == ts - assert result._reso == getattr(NpyDatetimeUnit, f"NPY_FR_{unit}").value + assert result._creso == getattr(NpyDatetimeUnit, f"NPY_FR_{unit}").value result = getattr(ts, method)("H", ambiguous=False) expected = Timestamp("2017-10-29 01:00:00", tz="UTC").tz_convert( "Europe/Madrid" ) assert result == expected - assert result._reso == getattr(NpyDatetimeUnit, f"NPY_FR_{unit}").value + assert result._creso == getattr(NpyDatetimeUnit, f"NPY_FR_{unit}").value result = getattr(ts, method)("H", ambiguous="NaT") assert result is NaT @@ -210,7 +210,7 @@ def test_round_dst_border_nonexistent(self, method, ts_str, freq, unit): result = getattr(ts, method)(freq, nonexistent="shift_forward") expected = Timestamp("2018-03-11 03:00:00", tz="America/Chicago") assert result == expected - assert result._reso == getattr(NpyDatetimeUnit, f"NPY_FR_{unit}").value + assert result._creso == getattr(NpyDatetimeUnit, f"NPY_FR_{unit}").value result = getattr(ts, method)(freq, nonexistent="NaT") assert result is NaT @@ -363,7 +363,7 @@ def test_replace_non_nano(self): assert ts.to_pydatetime() == datetime(4869, 12, 28) result = ts.replace(year=4900) - assert result._reso == ts._reso + assert result._creso == ts._creso assert result.to_pydatetime() == datetime(4900, 12, 28) def test_replace_naive(self): @@ -490,7 +490,7 @@ def test_replace_dst_border(self, unit): result = t.replace(hour=3) expected = Timestamp("2013-11-3 03:00:00", tz="America/Chicago") assert result == expected - assert result._reso == getattr(NpyDatetimeUnit, f"NPY_FR_{unit}").value + assert result._creso == getattr(NpyDatetimeUnit, f"NPY_FR_{unit}").value @pytest.mark.parametrize("fold", [0, 1]) @pytest.mark.parametrize("tz", ["dateutil/Europe/London", "Europe/London"]) @@ -504,7 +504,7 @@ def test_replace_dst_fold(self, fold, tz, unit): tz, ambiguous=not fold ) assert result == expected - assert result._reso == getattr(NpyDatetimeUnit, f"NPY_FR_{unit}").value + assert result._creso == getattr(NpyDatetimeUnit, f"NPY_FR_{unit}").value # -------------------------------------------------------------- # Timestamp.normalize @@ -517,7 +517,7 @@ def test_normalize(self, tz_naive_fixture, arg, unit): result = ts.normalize() expected = Timestamp("2013-11-30", tz=tz) assert result == expected - assert result._reso == getattr(NpyDatetimeUnit, f"NPY_FR_{unit}").value + assert result._creso == getattr(NpyDatetimeUnit, f"NPY_FR_{unit}").value def test_normalize_pre_epoch_dates(self): # GH: 36294 diff --git a/pandas/tests/tools/test_to_datetime.py b/pandas/tests/tools/test_to_datetime.py index 4dd1b32ba65e4..2b5457fc9f7b3 100644 --- a/pandas/tests/tools/test_to_datetime.py +++ b/pandas/tests/tools/test_to_datetime.py @@ -698,7 +698,7 @@ def test_to_datetime_dt64s_out_of_bounds(self, cache, dt): # as of 2022-09-28, the Timestamp constructor has been updated # to cast to M8[s] but to_datetime has not ts = Timestamp(dt) - assert ts._reso == NpyDatetimeUnit.NPY_FR_s.value + assert ts._creso == NpyDatetimeUnit.NPY_FR_s.value assert ts.asm8 == dt msg = "Out of bounds nanosecond timestamp" diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py index 34266e55d9ea9..0862fe430e430 100644 --- a/pandas/tests/tseries/offsets/test_offsets.py +++ b/pandas/tests/tseries/offsets/test_offsets.py @@ -575,7 +575,7 @@ def test_add_dt64_ndarray_non_nano(self, offset_types, unit, request): result = dta + off exp_unit = unit - if isinstance(off, Tick) and off._reso > dta._reso: + if isinstance(off, Tick) and off._creso > dta._creso: # cast to higher reso like we would with Timedelta scalar exp_unit = Timedelta(off)._unit expected = expected._as_unit(exp_unit) diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py index e74ab61d3881d..97dcd0b011b62 100644 --- a/pandas/tseries/frequencies.py +++ b/pandas/tseries/frequencies.py @@ -217,12 +217,12 @@ def __init__(self, index, warn: bool = True) -> None: if isinstance(index, ABCIndex): # error: Item "ndarray[Any, Any]" of "Union[ExtensionArray, # ndarray[Any, Any]]" has no attribute "_ndarray" - self._reso = get_unit_from_dtype( + self._creso = get_unit_from_dtype( index._data._ndarray.dtype # type: ignore[union-attr] ) else: # otherwise we have DTA/TDA - self._reso = get_unit_from_dtype(index._ndarray.dtype) + self._creso = get_unit_from_dtype(index._ndarray.dtype) # This moves the values, which are implicitly in UTC, to the # the timezone so they are in local time @@ -277,7 +277,7 @@ def get_freq(self) -> str | None: return None delta = self.deltas[0] - ppd = periods_per_day(self._reso) + ppd = periods_per_day(self._creso) if delta and _is_multiple(delta, ppd): return self._infer_daily_rule() @@ -316,17 +316,17 @@ def get_freq(self) -> str | None: @cache_readonly def day_deltas(self) -> list[int]: - ppd = periods_per_day(self._reso) + ppd = periods_per_day(self._creso) return [x / ppd for x in self.deltas] @cache_readonly def hour_deltas(self) -> list[int]: - pph = periods_per_day(self._reso) // 24 + pph = periods_per_day(self._creso) // 24 return [x / pph for x in self.deltas] @cache_readonly def fields(self) -> np.ndarray: # structured array of fields - return build_field_sarray(self.i8values, reso=self._reso) + return build_field_sarray(self.i8values, reso=self._creso) @cache_readonly def rep_stamp(self) -> Timestamp: @@ -377,7 +377,7 @@ def _infer_daily_rule(self) -> str | None: return None def _get_daily_rule(self) -> str | None: - ppd = periods_per_day(self._reso) + ppd = periods_per_day(self._creso) days = self.deltas[0] / ppd if days % 7 == 0: # Weekly @@ -433,7 +433,7 @@ def _is_business_daily(self) -> bool: # probably business daily, but need to confirm first_weekday = self.index[0].weekday() shifts = np.diff(self.index.asi8) - ppd = periods_per_day(self._reso) + ppd = periods_per_day(self._creso) shifts = np.floor_divide(shifts, ppd) weekdays = np.mod(first_weekday + np.cumsum(shifts), 7)