diff --git a/asv_bench/benchmarks/io/stata.py b/asv_bench/benchmarks/io/stata.py index b4d78c5d8f528..fff10cf10a4d3 100644 --- a/asv_bench/benchmarks/io/stata.py +++ b/asv_bench/benchmarks/io/stata.py @@ -38,7 +38,7 @@ def time_write_stata(self, convert_dates): class StataMissing(Stata): def setup(self, convert_dates): - super(StataMissing, self).setup(convert_dates) + super().setup(convert_dates) for i in range(10): missing_data = np.random.randn(self.N) missing_data[missing_data < 0] = np.nan diff --git a/pandas/_libs/tslibs/strptime.pyx b/pandas/_libs/tslibs/strptime.pyx index ddd5d0d224264..527d2da8e3182 100644 --- a/pandas/_libs/tslibs/strptime.pyx +++ b/pandas/_libs/tslibs/strptime.pyx @@ -528,7 +528,7 @@ class TimeRE(dict): self.locale_time = locale_time else: self.locale_time = LocaleTime() - base = super(TimeRE, self) + base = super() base.__init__({ # The " \d" part of the regex is to make %c from ANSI C work 'd': r"(?P3[0-1]|[1-2]\d|0[1-9]|[1-9]| [1-9])", diff --git a/pandas/compat/pickle_compat.py b/pandas/compat/pickle_compat.py index 037c49fd9e683..3b63cbf1cfabb 100644 --- a/pandas/compat/pickle_compat.py +++ b/pandas/compat/pickle_compat.py @@ -144,7 +144,7 @@ def find_class(self, module, name): # override superclass key = (module, name) module, name = _class_locations_map.get(key, key) - return super(Unpickler, self).find_class(module, name) + return super().find_class(module, name) Unpickler.dispatch = copy.copy(Unpickler.dispatch) diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index cbd27d0f8e4f0..47b92036c156b 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -1159,7 +1159,7 @@ class SelectNFrame(SelectN): """ def __init__(self, obj, n, keep, columns): - super(SelectNFrame, self).__init__(obj, n, keep) + super().__init__(obj, n, keep) if not is_list_like(columns) or isinstance(columns, tuple): columns = [columns] columns = list(columns) diff --git a/pandas/core/apply.py b/pandas/core/apply.py index a7aa9deaf99fe..7dc054c824fec 100644 --- a/pandas/core/apply.py +++ b/pandas/core/apply.py @@ -317,7 +317,7 @@ class FrameRowApply(FrameApply): axis = 0 def apply_broadcast(self): - return super(FrameRowApply, self).apply_broadcast(self.obj) + return super().apply_broadcast(self.obj) @property def series_generator(self): @@ -356,7 +356,7 @@ class FrameColumnApply(FrameApply): axis = 1 def apply_broadcast(self): - result = super(FrameColumnApply, self).apply_broadcast(self.obj.T) + result = super().apply_broadcast(self.obj.T) return result.T @property diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 92dc7590665f1..9c2aa03102533 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -1549,7 +1549,7 @@ def argsort(self, *args, **kwargs): array([3, 0, 1, 2]) """ # Keep the implementation here just for the docstring. - return super(Categorical, self).argsort(*args, **kwargs) + return super().argsort(*args, **kwargs) def sort_values(self, inplace=False, ascending=True, na_position='last'): """ diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 5ee975e60fcd2..3288cef2ff8c8 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -1381,9 +1381,7 @@ def _reduce(self, name, axis=0, skipna=True, **kwargs): if op: return op(axis=axis, skipna=skipna, **kwargs) else: - return super(DatetimeLikeArrayMixin, self)._reduce( - name, skipna, **kwargs - ) + return super()._reduce(name, skipna, **kwargs) def min(self, axis=None, skipna=True, *args, **kwargs): """ diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index a87801eeff57d..422880f7d0f7a 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -580,7 +580,7 @@ def __array__(self, dtype=None): # The default for tz-aware is object, to preserve tz info dtype = object - return super(DatetimeArray, self).__array__(dtype=dtype) + return super().__array__(dtype=dtype) def __iter__(self): """ @@ -771,7 +771,7 @@ def _add_delta(self, delta): ------- result : DatetimeArray """ - new_values = super(DatetimeArray, self)._add_delta(delta) + new_values = super()._add_delta(delta) return type(self)._from_sequence(new_values, tz=self.tz, freq='infer') # ----------------------------------------------------------------- diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index b0b87d98ce518..e65b36067b89d 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -505,7 +505,7 @@ def astype(self, dtype, copy=True): if is_period_dtype(dtype): return self.asfreq(dtype.freq) - return super(PeriodArray, self).astype(dtype, copy=copy) + return super().astype(dtype, copy=copy) @property def flags(self): @@ -560,7 +560,7 @@ def _add_offset(self, other): # Note: when calling parent class's _add_timedeltalike_scalar, # it will call delta_to_nanoseconds(delta). Because delta here # is an integer, delta_to_nanoseconds will return it unchanged. - result = super(PeriodArray, self)._add_timedeltalike_scalar(other.n) + result = super()._add_timedeltalike_scalar(other.n) return type(self)(result, freq=self.freq) def _add_timedeltalike_scalar(self, other): @@ -584,7 +584,7 @@ def _add_timedeltalike_scalar(self, other): # Note: when calling parent class's _add_timedeltalike_scalar, # it will call delta_to_nanoseconds(delta). Because delta here # is an integer, delta_to_nanoseconds will return it unchanged. - ordinals = super(PeriodArray, self)._add_timedeltalike_scalar(other) + ordinals = super()._add_timedeltalike_scalar(other) return ordinals def _add_delta_tdi(self, other): @@ -620,7 +620,7 @@ def _add_delta(self, other): # We cannot add timedelta-like to non-tick PeriodArray _raise_on_incompatible(self, other) - new_ordinals = super(PeriodArray, self)._add_delta(other) + new_ordinals = super()._add_delta(other) return type(self)(new_ordinals, freq=self.freq) def _check_timedeltalike_freq_compat(self, other): diff --git a/pandas/core/arrays/sparse.py b/pandas/core/arrays/sparse.py index 7e001b6ab9e73..d01aac9a8750c 100644 --- a/pandas/core/arrays/sparse.py +++ b/pandas/core/arrays/sparse.py @@ -118,7 +118,7 @@ def __init__( def __hash__(self): # Python3 doesn't inherit __hash__ when a base class overrides # __eq__, so we explicitly do it here. - return super(SparseDtype, self).__hash__() + return super().__hash__() def __eq__(self, other): # We have to override __eq__ to handle NA values in _metadata. diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index 6644b8144196b..a4ab6866b1bd1 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -387,7 +387,7 @@ def _add_delta(self, delta): ------- result : TimedeltaArray """ - new_values = super(TimedeltaArray, self)._add_delta(delta) + new_values = super()._add_delta(delta) return type(self)._from_sequence(new_values, freq='infer') def _add_datetime_arraylike(self, other): @@ -427,9 +427,7 @@ def _addsub_offset_array(self, other, op): # TimedeltaIndex can only operate with a subset of DateOffset # subclasses. Incompatible classes will raise AttributeError, # which we re-raise as TypeError - return super(TimedeltaArray, self)._addsub_offset_array( - other, op - ) + return super()._addsub_offset_array(other, op) except AttributeError: raise TypeError("Cannot add/subtract non-tick DateOffset to {cls}" .format(cls=type(self).__name__)) diff --git a/pandas/core/base.py b/pandas/core/base.py index de2989e851e04..18ced05d00f25 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -107,7 +107,7 @@ def __sizeof__(self): # no memory_usage attribute, so fall back to # object's 'sizeof' - return super(PandasObject, self).__sizeof__() + return super().__sizeof__() class NoNewAttributesMixin: diff --git a/pandas/core/computation/engines.py b/pandas/core/computation/engines.py index 0c2342082e9ef..c75552d15441d 100644 --- a/pandas/core/computation/engines.py +++ b/pandas/core/computation/engines.py @@ -98,10 +98,10 @@ class NumExprEngine(AbstractEngine): has_neg_frac = True def __init__(self, expr): - super(NumExprEngine, self).__init__(expr) + super().__init__(expr) def convert(self): - return str(super(NumExprEngine, self).convert()) + return str(super().convert()) def _evaluate(self): import numexpr as ne @@ -133,7 +133,7 @@ class PythonEngine(AbstractEngine): has_neg_frac = False def __init__(self, expr): - super(PythonEngine, self).__init__(expr) + super().__init__(expr) def evaluate(self): return self.expr() diff --git a/pandas/core/computation/expr.py b/pandas/core/computation/expr.py index 245cd9c403080..b697ada21f6b9 100644 --- a/pandas/core/computation/expr.py +++ b/pandas/core/computation/expr.py @@ -694,15 +694,14 @@ def __init__(self, env, engine, parser, preparser=partial(_preparse, f=_compose( _replace_locals, _replace_booleans, _clean_spaces_backtick_quoted_names))): - super(PandasExprVisitor, self).__init__(env, engine, parser, preparser) + super().__init__(env, engine, parser, preparser) @disallow(_unsupported_nodes | _python_not_supported | frozenset(['Not'])) class PythonExprVisitor(BaseExprVisitor): def __init__(self, env, engine, parser, preparser=lambda x: x): - super(PythonExprVisitor, self).__init__(env, engine, parser, - preparser=preparser) + super().__init__(env, engine, parser, preparser=preparser) class Expr(StringMixin): diff --git a/pandas/core/computation/ops.py b/pandas/core/computation/ops.py index 9061fa0308830..c604bf45a6598 100644 --- a/pandas/core/computation/ops.py +++ b/pandas/core/computation/ops.py @@ -43,7 +43,7 @@ def __init__(self, name, is_local): msg = 'local variable {0!r} is not defined' else: msg = 'name {0!r} is not defined' - super(UndefinedVariableError, self).__init__(msg.format(name)) + super().__init__(msg.format(name)) class Term(StringMixin): @@ -161,8 +161,7 @@ def ndim(self): class Constant(Term): def __init__(self, value, env, side=None, encoding=None): - super(Constant, self).__init__(value, env, side=side, - encoding=encoding) + super().__init__(value, env, side=side, encoding=encoding) def _resolve_name(self): return self._name @@ -329,7 +328,7 @@ class BinOp(Op): """ def __init__(self, op, lhs, rhs, **kwargs): - super(BinOp, self).__init__(op, (lhs, rhs)) + super().__init__(op, (lhs, rhs)) self.lhs = lhs self.rhs = rhs @@ -462,7 +461,7 @@ class Div(BinOp): """ def __init__(self, lhs, rhs, truediv, *args, **kwargs): - super(Div, self).__init__('/', lhs, rhs, *args, **kwargs) + super().__init__('/', lhs, rhs, *args, **kwargs) if not isnumeric(lhs.return_type) or not isnumeric(rhs.return_type): raise TypeError("unsupported operand type(s) for {0}:" @@ -498,7 +497,7 @@ class UnaryOp(Op): """ def __init__(self, op, operand): - super(UnaryOp, self).__init__(op, (operand,)) + super().__init__(op, (operand,)) self.operand = operand try: @@ -528,7 +527,7 @@ def return_type(self): class MathCall(Op): def __init__(self, func, args): - super(MathCall, self).__init__(func.name, args) + super().__init__(func.name, args) self.func = func def __call__(self, env): diff --git a/pandas/core/computation/pytables.py b/pandas/core/computation/pytables.py index c957c8c85e3ad..0a08123264fd7 100644 --- a/pandas/core/computation/pytables.py +++ b/pandas/core/computation/pytables.py @@ -26,8 +26,9 @@ class Scope(expr.Scope): def __init__(self, level, global_dict=None, local_dict=None, queryables=None): - super(Scope, self).__init__(level + 1, global_dict=global_dict, - local_dict=local_dict) + super().__init__(level + 1, + global_dict=global_dict, + local_dict=local_dict) self.queryables = queryables or dict() @@ -39,7 +40,7 @@ def __new__(cls, name, env, side=None, encoding=None): return supr_new(klass) def __init__(self, name, env, side=None, encoding=None): - super(Term, self).__init__(name, env, side=side, encoding=encoding) + super().__init__(name, env, side=side, encoding=encoding) def _resolve_name(self): # must be a queryables @@ -63,8 +64,7 @@ def value(self): class Constant(Term): def __init__(self, value, env, side=None, encoding=None): - super(Constant, self).__init__(value, env, side=side, - encoding=encoding) + super().__init__(value, env, side=side, encoding=encoding) def _resolve_name(self): return self._name @@ -75,7 +75,7 @@ class BinOp(ops.BinOp): _max_selectors = 31 def __init__(self, op, lhs, rhs, queryables, encoding): - super(BinOp, self).__init__(op, lhs, rhs) + super().__init__(op, lhs, rhs) self.queryables = queryables self.encoding = encoding self.filter = None @@ -385,7 +385,7 @@ class ExprVisitor(BaseExprVisitor): term_type = Term def __init__(self, env, engine, parser, **kwargs): - super(ExprVisitor, self).__init__(env, engine, parser) + super().__init__(env, engine, parser) for bin_op in self.binary_ops: bin_node = self.binary_op_nodes_map[bin_op] setattr(self, 'visit_{node}'.format(node=bin_node), diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 2008c444fad5e..e539391ba011e 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -2630,7 +2630,7 @@ def transpose(self, *args, **kwargs): dtype: object """ nv.validate_transpose(args, dict()) - return super(DataFrame, self).transpose(1, 0, **kwargs) + return super().transpose(1, 0, **kwargs) T = property(transpose) @@ -3761,12 +3761,10 @@ def _reindex_multi(self, axes, copy, fill_value): def align(self, other, join='outer', axis=None, level=None, copy=True, fill_value=None, method=None, limit=None, fill_axis=0, broadcast_axis=None): - return super(DataFrame, self).align(other, join=join, axis=axis, - level=level, copy=copy, - fill_value=fill_value, - method=method, limit=limit, - fill_axis=fill_axis, - broadcast_axis=broadcast_axis) + return super().align(other, join=join, axis=axis, level=level, + copy=copy, fill_value=fill_value, method=method, + limit=limit, fill_axis=fill_axis, + broadcast_axis=broadcast_axis) @Substitution(**_shared_doc_kwargs) @Appender(NDFrame.reindex.__doc__) @@ -3783,15 +3781,14 @@ def reindex(self, *args, **kwargs): # Pop these, since the values are in `kwargs` under different names kwargs.pop('axis', None) kwargs.pop('labels', None) - return super(DataFrame, self).reindex(**kwargs) + return super().reindex(**kwargs) @Appender(_shared_docs['reindex_axis'] % _shared_doc_kwargs) def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True, limit=None, fill_value=np.nan): - return super(DataFrame, - self).reindex_axis(labels=labels, axis=axis, - method=method, level=level, copy=copy, - limit=limit, fill_value=fill_value) + return super().reindex_axis(labels=labels, axis=axis, method=method, + level=level, copy=copy, limit=limit, + fill_value=fill_value) def drop(self, labels=None, axis=0, index=None, columns=None, level=None, inplace=False, errors='raise'): @@ -3917,10 +3914,9 @@ def drop(self, labels=None, axis=0, index=None, columns=None, falcon speed 320.0 250.0 weight 1.0 0.8 """ - return super(DataFrame, self).drop(labels=labels, axis=axis, - index=index, columns=columns, - level=level, inplace=inplace, - errors=errors) + return super().drop(labels=labels, axis=axis, index=index, + columns=columns, level=level, inplace=inplace, + errors=errors) @rewrite_axis_style_signature('mapper', [('copy', True), ('inplace', False), @@ -4029,29 +4025,27 @@ def rename(self, *args, **kwargs): # Pop these, since the values are in `kwargs` under different names kwargs.pop('axis', None) kwargs.pop('mapper', None) - return super(DataFrame, self).rename(**kwargs) + return super().rename(**kwargs) @Substitution(**_shared_doc_kwargs) @Appender(NDFrame.fillna.__doc__) def fillna(self, value=None, method=None, axis=None, inplace=False, limit=None, downcast=None, **kwargs): - return super(DataFrame, - self).fillna(value=value, method=method, axis=axis, - inplace=inplace, limit=limit, - downcast=downcast, **kwargs) + return super().fillna(value=value, method=method, axis=axis, + inplace=inplace, limit=limit, downcast=downcast, + **kwargs) @Appender(_shared_docs['replace'] % _shared_doc_kwargs) def replace(self, to_replace=None, value=None, inplace=False, limit=None, regex=False, method='pad'): - return super(DataFrame, self).replace(to_replace=to_replace, - value=value, inplace=inplace, - limit=limit, regex=regex, - method=method) + return super().replace(to_replace=to_replace, value=value, + inplace=inplace, limit=limit, regex=regex, + method=method) @Appender(_shared_docs['shift'] % _shared_doc_kwargs) def shift(self, periods=1, freq=None, axis=0, fill_value=None): - return super(DataFrame, self).shift(periods=periods, freq=freq, - axis=axis, fill_value=fill_value) + return super().shift(periods=periods, freq=freq, axis=axis, + fill_value=fill_value) def set_index(self, keys, drop=True, append=False, inplace=False, verify_integrity=False): @@ -4479,19 +4473,19 @@ def _maybe_casted_values(index, labels=None): @Appender(_shared_docs['isna'] % _shared_doc_kwargs) def isna(self): - return super(DataFrame, self).isna() + return super().isna() @Appender(_shared_docs['isna'] % _shared_doc_kwargs) def isnull(self): - return super(DataFrame, self).isnull() + return super().isnull() @Appender(_shared_docs['notna'] % _shared_doc_kwargs) def notna(self): - return super(DataFrame, self).notna() + return super().notna() @Appender(_shared_docs['notna'] % _shared_doc_kwargs) def notnull(self): - return super(DataFrame, self).notnull() + return super().notnull() def dropna(self, axis=0, how='any', thresh=None, subset=None, inplace=False): @@ -6334,7 +6328,7 @@ def _aggregate(self, arg, axis=0, *args, **kwargs): ._aggregate(arg, *args, **kwargs)) result = result.T if result is not None else result return result, how - return super(DataFrame, self)._aggregate(arg, *args, **kwargs) + return super()._aggregate(arg, *args, **kwargs) agg = aggregate @@ -6343,7 +6337,7 @@ def transform(self, func, axis=0, *args, **kwargs): axis = self._get_axis_number(axis) if axis == 1: return super(DataFrame, self.T).transform(func, *args, **kwargs).T - return super(DataFrame, self).transform(func, *args, **kwargs) + return super().transform(func, *args, **kwargs) def apply(self, func, axis=0, broadcast=None, raw=False, reduce=None, result_type=None, args=(), **kwds): diff --git a/pandas/core/generic.py b/pandas/core/generic.py index bf8c6acae0445..0f92ea800c3e7 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -5144,7 +5144,7 @@ def _dir_additions(self): """ additions = {c for c in self._info_axis.unique(level=0)[:100] if isinstance(c, str) and c.isidentifier()} - return super(NDFrame, self)._dir_additions().union(additions) + return super()._dir_additions().union(additions) # ---------------------------------------------------------------------- # Getting and setting elements diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 01784513704b4..f8b9ddce6000e 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -722,7 +722,7 @@ def _selection_name(self): .format(input='series', examples=_apply_docs['series_examples'])) def apply(self, func, *args, **kwargs): - return super(SeriesGroupBy, self).apply(func, *args, **kwargs) + return super().apply(func, *args, **kwargs) @Substitution(see_also=_agg_see_also_doc, examples=_agg_examples_doc, @@ -1290,7 +1290,7 @@ class DataFrameGroupBy(NDFrameGroupBy): axis='') @Appender(_shared_docs['aggregate']) def aggregate(self, arg, *args, **kwargs): - return super(DataFrameGroupBy, self).aggregate(arg, *args, **kwargs) + return super().aggregate(arg, *args, **kwargs) agg = aggregate @@ -1471,7 +1471,7 @@ def _apply_to_column_groupbys(self, func): def _fill(self, direction, limit=None): """Overridden method to join grouped columns in output""" - res = super(DataFrameGroupBy, self)._fill(direction, limit=limit) + res = super()._fill(direction, limit=limit) output = OrderedDict( (grp.name, grp.grouper) for grp in self.grouper.groupings) diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index ade8474b251d1..6bb8f299e811f 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -1995,7 +1995,7 @@ def dropna(self, how='any'): def unique(self, level=None): if level is not None: self._validate_index_level(level) - result = super(Index, self).unique() + result = super().unique() return self._shallow_copy(result) def drop_duplicates(self, keep='first'): @@ -2044,7 +2044,7 @@ def drop_duplicates(self, keep='first'): >>> idx.drop_duplicates(keep=False) Index(['cow', 'beetle', 'hippo'], dtype='object') """ - return super(Index, self).drop_duplicates(keep=keep) + return super().drop_duplicates(keep=keep) def duplicated(self, keep='first'): """ @@ -2100,7 +2100,7 @@ def duplicated(self, keep='first'): >>> idx.duplicated(keep=False) array([ True, False, True, False, True]) """ - return super(Index, self).duplicated(keep=keep) + return super().duplicated(keep=keep) def get_duplicates(self): """ @@ -3699,7 +3699,7 @@ def get_values(self): @Appender(IndexOpsMixin.memory_usage.__doc__) def memory_usage(self, deep=False): - result = super(Index, self).memory_usage(deep=deep) + result = super().memory_usage(deep=deep) # include our engine hashtable result += self._engine.sizeof(deep=deep) @@ -4507,8 +4507,7 @@ def map(self, mapper, na_action=None): """ from .multi import MultiIndex - new_values = super(Index, self)._map_values( - mapper, na_action=na_action) + new_values = super()._map_values(mapper, na_action=na_action) attributes = self._get_attributes_dict() diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index 930b2a4a5161f..473686a7541a5 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -250,8 +250,7 @@ def _simple_new(cls, values, name=None, dtype=None, **kwargs): def _shallow_copy(self, values=None, dtype=None, **kwargs): if dtype is None: dtype = self.dtype - return super(CategoricalIndex, self)._shallow_copy( - values=values, dtype=dtype, **kwargs) + return super()._shallow_copy(values=values, dtype=dtype, **kwargs) def _is_dtype_compat(self, other): """ @@ -397,7 +396,7 @@ def astype(self, dtype, copy=True): if dtype == self.dtype: return self.copy() if copy else self - return super(CategoricalIndex, self).astype(dtype=dtype, copy=copy) + return super().astype(dtype=dtype, copy=copy) @cache_readonly def _isnan(self): @@ -503,7 +502,7 @@ def get_value(self, series, key): pass # we might be a positional inexer - return super(CategoricalIndex, self).get_value(series, key) + return super().get_value(series, key) def _can_reindex(self, indexer): """ always allow reindexing """ @@ -666,8 +665,7 @@ def _convert_scalar_indexer(self, key, kind=None): if self.categories._defer_to_indexing: return self.categories._convert_scalar_indexer(key, kind=kind) - return super(CategoricalIndex, self)._convert_scalar_indexer( - key, kind=kind) + return super()._convert_scalar_indexer(key, kind=kind) @Appender(_index_shared_docs['_convert_list_indexer']) def _convert_list_indexer(self, keyarr, kind=None): diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 197fbc2c22927..6fb26d3e88cae 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -441,7 +441,7 @@ def _format_attrs(self): """ Return a list of tuples of the (attr,formatted_value). """ - attrs = super(DatetimeIndexOpsMixin, self)._format_attrs() + attrs = super()._format_attrs() for attrib in self._attributes: if attrib == 'freq': freq = self.freqstr @@ -475,8 +475,7 @@ def _convert_scalar_indexer(self, key, kind=None): elif kind in ['ix', 'getitem'] and is_flt: self._invalid_indexer('index', key) - return (super(DatetimeIndexOpsMixin, self) - ._convert_scalar_indexer(key, kind=kind)) + return super()._convert_scalar_indexer(key, kind=kind) @classmethod def _add_datetimelike_methods(cls): diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 011c5e3b1efc2..151d66223ce1c 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -383,7 +383,7 @@ def __reduce__(self): def __setstate__(self, state): """Necessary for making this object picklable""" if isinstance(state, dict): - super(DatetimeIndex, self).__setstate__(state) + super().__setstate__(state) elif isinstance(state, tuple): @@ -482,7 +482,7 @@ def union(self, other, sort=None): self._assert_can_do_setop(other) if len(other) == 0 or self.equals(other) or len(self) == 0: - return super(DatetimeIndex, self).union(other, sort=sort) + return super().union(other, sort=sort) if not isinstance(other, DatetimeIndex): try: diff --git a/pandas/core/indexes/frozen.py b/pandas/core/indexes/frozen.py index cad094e59b022..2132147130e6d 100644 --- a/pandas/core/indexes/frozen.py +++ b/pandas/core/indexes/frozen.py @@ -47,7 +47,7 @@ def union(self, other): """ if isinstance(other, tuple): other = list(other) - return type(self)(super(FrozenList, self).__add__(other)) + return type(self)(super().__add__(other)) def difference(self, other): """ @@ -72,13 +72,13 @@ def difference(self, other): # Python 2 compat def __getslice__(self, i, j): - return self.__class__(super(FrozenList, self).__getslice__(i, j)) + return self.__class__(super().__getslice__(i, j)) def __getitem__(self, n): # Python 3 compat if isinstance(n, slice): - return self.__class__(super(FrozenList, self).__getitem__(n)) - return super(FrozenList, self).__getitem__(n) + return self.__class__(super().__getitem__(n)) + return super().__getitem__(n) def __radd__(self, other): if isinstance(other, tuple): @@ -88,12 +88,12 @@ def __radd__(self, other): def __eq__(self, other): if isinstance(other, (tuple, FrozenList)): other = list(other) - return super(FrozenList, self).__eq__(other) + return super().__eq__(other) __req__ = __eq__ def __mul__(self, other): - return self.__class__(super(FrozenList, self).__mul__(other)) + return self.__class__(super().__mul__(other)) __imul__ = __mul__ @@ -181,8 +181,7 @@ def searchsorted(self, value, side="left", sorter=None): except ValueError: pass - return super(FrozenNDArray, self).searchsorted( - value, side=side, sorter=sorter) + return super().searchsorted(value, side=side, sorter=sorter) def _ensure_frozen(array_like, categories, copy=False): diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 0155a076eb017..ae63574533745 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -406,7 +406,7 @@ def astype(self, dtype, copy=True): new_values = self.values.astype(dtype, copy=copy) if is_interval_dtype(new_values): return self._shallow_copy(new_values.left, new_values.right) - return super(IntervalIndex, self).astype(dtype, copy=copy) + return super().astype(dtype, copy=copy) @cache_readonly def dtype(self): @@ -527,8 +527,7 @@ def is_overlapping(self): @Appender(_index_shared_docs['_convert_scalar_indexer']) def _convert_scalar_indexer(self, key, kind=None): if kind == 'iloc': - return super(IntervalIndex, self)._convert_scalar_indexer( - key, kind=kind) + return super()._convert_scalar_indexer(key, kind=kind) return key def _maybe_cast_slice_bound(self, label, side, kind): @@ -912,7 +911,7 @@ def _get_reindexer(self, target): @Appender(_index_shared_docs['get_indexer_non_unique'] % _index_doc_kwargs) def get_indexer_non_unique(self, target): target = self._maybe_cast_indexed(ensure_index(target)) - return super(IntervalIndex, self).get_indexer_non_unique(target) + return super().get_indexer_non_unique(target) @Appender(_index_shared_docs['where']) def where(self, cond, other=None): @@ -987,7 +986,7 @@ def _concat_same_dtype(self, to_concat, name): msg = ('can only append two IntervalIndex objects ' 'that are closed on the same side') raise ValueError(msg) - return super(IntervalIndex, self)._concat_same_dtype(to_concat, name) + return super()._concat_same_dtype(to_concat, name) @Appender(_index_shared_docs['take'] % _index_doc_kwargs) def take(self, indices, axis=0, allow_fill=True, diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 34413f441a5d6..171a535888aab 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -1421,7 +1421,7 @@ def get_level_values(self, level): def unique(self, level=None): if level is None: - return super(MultiIndex, self).unique() + return super().unique() else: level = self._get_level_number(level) return self._get_level_values(level=level, unique=True) @@ -2121,8 +2121,7 @@ def _convert_listlike_indexer(self, keyarr, kind=None): indexer is an ndarray or None if cannot convert keyarr are tuple-safe keys """ - indexer, keyarr = super(MultiIndex, self)._convert_listlike_indexer( - keyarr, kind=kind) + indexer, keyarr = super()._convert_listlike_indexer(keyarr, kind=kind) # are we indexing a specific level if indexer is None and len(keyarr) and not isinstance(keyarr[0], @@ -2181,7 +2180,7 @@ def get_indexer(self, target, method=None, limit=None, tolerance=None): @Appender(_index_shared_docs['get_indexer_non_unique'] % _index_doc_kwargs) def get_indexer_non_unique(self, target): - return super(MultiIndex, self).get_indexer_non_unique(target) + return super().get_indexer_non_unique(target) def reindex(self, target, method=None, level=None, limit=None, tolerance=None): @@ -2306,7 +2305,7 @@ def slice_locs(self, start=None, end=None, step=None, kind=None): """ # This function adds nothing to its parent implementation (the magic # happens in get_slice_bound method), but it adds meaningful doc. - return super(MultiIndex, self).slice_locs(start, end, step, kind=kind) + return super().slice_locs(start, end, step, kind=kind) def _partial_tup_index(self, tup, side='left'): if len(tup) > self.lexsort_depth: diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py index 5d6332f301c96..a11f34cbdcceb 100644 --- a/pandas/core/indexes/numeric.py +++ b/pandas/core/indexes/numeric.py @@ -68,8 +68,7 @@ def _shallow_copy(self, values=None, **kwargs): if values is not None and not self._can_hold_na: # Ensure we are not returning an Int64Index with float data: return self._shallow_copy_with_infer(values=values, **kwargs) - return (super(NumericIndex, self)._shallow_copy(values=values, - **kwargs)) + return super()._shallow_copy(values=values, **kwargs) def _convert_for_op(self, value): """ Convert value to be insertable to ndarray """ @@ -121,7 +120,7 @@ def insert(self, loc, item): # treat NA values as nans: if is_scalar(item) and isna(item): item = self._na_value - return super(NumericIndex, self).insert(loc, item) + return super().insert(loc, item) _num_index_shared_docs['class_descr'] = """ @@ -206,8 +205,7 @@ def _convert_scalar_indexer(self, key, kind=None): # don't coerce ilocs to integers if kind != 'iloc': key = self._maybe_cast_indexer(key) - return (super(Int64Index, self) - ._convert_scalar_indexer(key, kind=kind)) + return super()._convert_scalar_indexer(key, kind=kind) def _wrap_joined_index(self, joined, other): name = get_op_result_name(self, other) @@ -260,8 +258,7 @@ def _convert_scalar_indexer(self, key, kind=None): # don't coerce ilocs to integers if kind != 'iloc': key = self._maybe_cast_indexer(key) - return (super(UInt64Index, self) - ._convert_scalar_indexer(key, kind=kind)) + return super()._convert_scalar_indexer(key, kind=kind) @Appender(_index_shared_docs['_convert_arr_indexer']) def _convert_arr_indexer(self, keyarr): @@ -332,7 +329,7 @@ def astype(self, dtype, copy=True): # TODO(jreback); this can change once we have an EA Index type # GH 13149 raise ValueError('Cannot convert NA to integer') - return super(Float64Index, self).astype(dtype, copy=copy) + return super().astype(dtype, copy=copy) @Appender(_index_shared_docs['_convert_scalar_indexer']) def _convert_scalar_indexer(self, key, kind=None): @@ -350,8 +347,7 @@ def _convert_slice_indexer(self, key, kind=None): return key if kind == 'iloc': - return super(Float64Index, self)._convert_slice_indexer(key, - kind=kind) + return super()._convert_slice_indexer(key, kind=kind) # translate to locations return self.slice_indexer(key.start, key.stop, key.step, kind=kind) @@ -400,7 +396,7 @@ def equals(self, other): return False def __contains__(self, other): - if super(Float64Index, self).__contains__(other): + if super().__contains__(other): return True try: @@ -431,12 +427,11 @@ def get_loc(self, key, method=None, tolerance=None): return nan_idxs except (TypeError, NotImplementedError): pass - return super(Float64Index, self).get_loc(key, method=method, - tolerance=tolerance) + return super().get_loc(key, method=method, tolerance=tolerance) @cache_readonly def is_unique(self): - return super(Float64Index, self).is_unique and self._nan_idxs.size < 2 + return super().is_unique and self._nan_idxs.size < 2 @Appender(Index.isin.__doc__) def isin(self, values, level=None): diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index e2c222f11d85d..420195b514607 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -524,7 +524,7 @@ def astype(self, dtype, copy=True, how='start'): return self.to_timestamp(how=how).tz_localize(tz) # TODO: should probably raise on `how` here, so we don't ignore it. - return super(PeriodIndex, self).astype(dtype, copy=copy) + return super().astype(dtype, copy=copy) @Substitution(klass='PeriodIndex') @Appender(_shared_docs['searchsorted']) @@ -576,7 +576,7 @@ def get_value(self, series, key): s = com.values_from_object(series) try: return com.maybe_box(self, - super(PeriodIndex, self).get_value(s, key), + super().get_value(s, key), series, key) except (KeyError, IndexError): try: @@ -634,7 +634,7 @@ def _get_unique_index(self, dropna=False): """ wrap Index._get_unique_index to handle NaT """ - res = super(PeriodIndex, self)._get_unique_index(dropna=dropna) + res = super()._get_unique_index(dropna=dropna) if dropna: res = res.dropna() return res @@ -801,7 +801,7 @@ def join(self, other, how='left', level=None, return_indexers=False, return self._apply_meta(result) def _assert_can_do_setop(self, other): - super(PeriodIndex, self)._assert_can_do_setop(other) + super()._assert_can_do_setop(other) if not isinstance(other, PeriodIndex): raise ValueError('can only call with other PeriodIndex-ed objects') @@ -828,7 +828,7 @@ def __setstate__(self, state): """Necessary for making this object picklable""" if isinstance(state, dict): - super(PeriodIndex, self).__setstate__(state) + super().__setstate__(state) elif isinstance(state, tuple): diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index cd31fdeca03ab..9b0c611651b94 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -366,7 +366,7 @@ def equals(self, other): self._start == other._start and self._step == other._step) - return super(RangeIndex, self).equals(other) + return super().equals(other) def intersection(self, other, sort=False): """ @@ -395,7 +395,7 @@ def intersection(self, other, sort=False): return self._get_reconciled_name_object(other) if not isinstance(other, RangeIndex): - return super(RangeIndex, self).intersection(other, sort=sort) + return super().intersection(other, sort=sort) if not len(self) or not len(other): return RangeIndex._simple_new(None) @@ -485,7 +485,7 @@ def union(self, other, sort=None): """ self._assert_can_do_setop(other) if len(other) == 0 or self.equals(other) or len(self) == 0: - return super(RangeIndex, self).union(other, sort=sort) + return super().union(other, sort=sort) if isinstance(other, RangeIndex) and sort is None: start_s, step_s = self._start, self._step @@ -534,8 +534,7 @@ def join(self, other, how='left', level=None, return_indexers=False, return self._int64index.join(other, how, level, return_indexers, sort) - return super(RangeIndex, self).join(other, how, level, return_indexers, - sort) + return super().join(other, how, level, return_indexers, sort) def _concat_same_dtype(self, indexes, name): return _concat._concat_rangeindex_same_dtype(indexes).rename(name) @@ -554,7 +553,7 @@ def __getitem__(self, key): """ Conserve RangeIndex type for scalar and slice keys. """ - super_getitem = super(RangeIndex, self).__getitem__ + super_getitem = super().__getitem__ if is_scalar(key): if not lib.is_integer(key): diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index 976130635c990..148cf5ad44f0c 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -258,7 +258,7 @@ def _simple_new(cls, values, name=None, freq=None, dtype=_TD_DTYPE): def __setstate__(self, state): """Necessary for making this object picklable""" if isinstance(state, dict): - super(TimedeltaIndex, self).__setstate__(state) + super().__setstate__(state) else: raise Exception("invalid pickle state") _unpickle_compat = __setstate__ @@ -346,7 +346,7 @@ def union(self, other): self._assert_can_do_setop(other) if len(other) == 0 or self.equals(other) or len(self) == 0: - return super(TimedeltaIndex, self).union(other) + return super().union(other) if not isinstance(other, TimedeltaIndex): try: diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 68d4e746f72ad..c00d23714beea 100755 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -1414,7 +1414,7 @@ class _IXIndexer(_NDFrameIndexer): def __init__(self, name, obj): warnings.warn(self._ix_deprecation_warning, DeprecationWarning, stacklevel=2) - super(_IXIndexer, self).__init__(name, obj) + super().__init__(name, obj) @Appender(_NDFrameIndexer._validate_key.__doc__) def _validate_key(self, key, axis): diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 662fe6e3ecb37..06440ff374c68 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -1544,8 +1544,7 @@ def __init__(self, values, placement, ndim=None): ndim = 1 else: ndim = 2 - super(NonConsolidatableMixIn, self).__init__(values, placement, - ndim=ndim) + super().__init__(values, placement, ndim=ndim) @property def shape(self): @@ -1657,7 +1656,7 @@ class ExtensionBlock(NonConsolidatableMixIn, Block): def __init__(self, values, placement, ndim=None): values = self._maybe_coerce_values(values) - super(ExtensionBlock, self).__init__(values, placement, ndim) + super().__init__(values, placement, ndim) def _maybe_coerce_values(self, values): """Unbox to an extension array. @@ -2066,8 +2065,7 @@ class DatetimeBlock(DatetimeLikeBlockMixin, Block): def __init__(self, values, placement, ndim=None): values = self._maybe_coerce_values(values) - super(DatetimeBlock, self).__init__(values, - placement=placement, ndim=ndim) + super().__init__(values, placement=placement, ndim=ndim) def _maybe_coerce_values(self, values): """Input validation for values passed to __init__. Ensure that @@ -2109,7 +2107,7 @@ def _astype(self, dtype, **kwargs): return self.make_block(values) # delegate - return super(DatetimeBlock, self)._astype(dtype=dtype, **kwargs) + return super()._astype(dtype=dtype, **kwargs) def _can_hold_element(self, element): tipo = maybe_infer_dtype_type(element) @@ -2411,16 +2409,13 @@ def concat_same_type(self, to_concat, placement=None): if self.ndim > 1: values = np.atleast_2d(values) return ObjectBlock(values, ndim=self.ndim, placement=placement) - return super(DatetimeTZBlock, self).concat_same_type(to_concat, - placement) + return super().concat_same_type(to_concat, placement) def fillna(self, value, limit=None, inplace=False, downcast=None): # We support filling a DatetimeTZ with a `value` whose timezone # is different by coercing to object. try: - return super(DatetimeTZBlock, self).fillna( - value, limit, inplace, downcast - ) + return super().fillna(value, limit, inplace, downcast) except (ValueError, TypeError): # different timezones, or a non-tz return self.astype(object).fillna( @@ -2432,7 +2427,7 @@ def setitem(self, indexer, value): # Need a dedicated setitem until #24020 (type promotion in setitem # for extension arrays) is designed and implemented. try: - return super(DatetimeTZBlock, self).setitem(indexer, value) + return super().setitem(indexer, value) except (ValueError, TypeError): newb = make_block(self.values.astype(object), placement=self.mgr_locs, @@ -2458,8 +2453,7 @@ def __init__(self, values, placement, ndim=None): if isinstance(values, TimedeltaArray): values = values._data assert isinstance(values, np.ndarray), type(values) - super(TimeDeltaBlock, self).__init__(values, - placement=placement, ndim=ndim) + super().__init__(values, placement=placement, ndim=ndim) @property def _holder(self): @@ -2488,7 +2482,7 @@ def fillna(self, value, **kwargs): "instead.", FutureWarning, stacklevel=6) value = Timedelta(value, unit='s') - return super(TimeDeltaBlock, self).fillna(value, **kwargs) + return super().fillna(value, **kwargs) def _try_coerce_args(self, values, other): """ @@ -2587,9 +2581,8 @@ def replace(self, to_replace, value, inplace=False, filter=None, to_replace_values = np.atleast_1d(to_replace) if not np.can_cast(to_replace_values, bool): return self - return super(BoolBlock, self).replace(to_replace, value, - inplace=inplace, filter=filter, - regex=regex, convert=convert) + return super().replace(to_replace, value, inplace=inplace, + filter=filter, regex=regex, convert=convert) class ObjectBlock(Block): @@ -2601,8 +2594,7 @@ def __init__(self, values, placement=None, ndim=2): if issubclass(values.dtype.type, str): values = np.array(values, dtype=object) - super(ObjectBlock, self).__init__(values, ndim=ndim, - placement=placement) + super().__init__(values, ndim=ndim, placement=placement) @property def is_bool(self): @@ -2731,10 +2723,8 @@ def replace(self, to_replace, value, inplace=False, filter=None, filter=filter, regex=True, convert=convert) elif not (either_list or regex): - return super(ObjectBlock, self).replace(to_replace, value, - inplace=inplace, - filter=filter, regex=regex, - convert=convert) + return super().replace(to_replace, value, inplace=inplace, + filter=filter, regex=regex, convert=convert) elif both_lists: for to_rep, v in zip(to_replace, value): result_blocks = [] @@ -2819,9 +2809,8 @@ def _replace_single(self, to_replace, value, inplace=False, filter=None, else: # if the thing to replace is not a string or compiled regex call # the superclass method -> to_replace is some kind of object - return super(ObjectBlock, self).replace(to_replace, value, - inplace=inplace, - filter=filter, regex=regex) + return super().replace(to_replace, value, inplace=inplace, + filter=filter, regex=regex) new_values = self.values if inplace else self.values.copy() @@ -2887,7 +2876,7 @@ def _replace_coerce(self, to_replace, value, inplace=True, regex=False, A new block if there is anything to replace or the original block. """ if mask.any(): - block = super(ObjectBlock, self)._replace_coerce( + block = super()._replace_coerce( to_replace=to_replace, value=value, inplace=inplace, regex=regex, convert=convert, mask=mask) if convert: @@ -2908,9 +2897,9 @@ def __init__(self, values, placement, ndim=None): from pandas.core.arrays.categorical import _maybe_to_categorical # coerce to categorical if we can - super(CategoricalBlock, self).__init__(_maybe_to_categorical(values), - placement=placement, - ndim=ndim) + super().__init__(_maybe_to_categorical(values), + placement=placement, + ndim=ndim) @property def _holder(self): @@ -2990,7 +2979,7 @@ def where(self, other, cond, align=True, errors='raise', ) try: # Attempt to do preserve categorical dtype. - result = super(CategoricalBlock, self).where( + result = super().where( other, cond, align, errors, try_cast, axis, transpose ) except (TypeError, ValueError): diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index 95516aec060b7..ffbb62e64d4e4 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -57,7 +57,7 @@ def set_use_bottleneck(v=True): class disallow: def __init__(self, *dtypes): - super(disallow, self).__init__() + super().__init__() self.dtypes = tuple(pandas_dtype(dtype).type for dtype in dtypes) def check(self, obj): diff --git a/pandas/core/panel.py b/pandas/core/panel.py index 645a3dc31f637..c642d96a4acfd 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -284,7 +284,7 @@ def __getitem__(self, key): if isinstance(self._info_axis, MultiIndex): return self._getitem_multilevel(key) if not (is_list_like(key) or isinstance(key, slice)): - return super(Panel, self).__getitem__(key) + return super().__getitem__(key) return self.loc[key] def _getitem_multilevel(self, key): @@ -1238,7 +1238,7 @@ def reindex(self, *args, **kwargs): with warnings.catch_warnings(): warnings.simplefilter("ignore", FutureWarning) # do not warn about constructing Panel when reindexing - result = super(Panel, self).reindex(**kwargs) + result = super().reindex(**kwargs) return result @Substitution(**_shared_doc_kwargs) @@ -1248,16 +1248,15 @@ def rename(self, items=None, major_axis=None, minor_axis=None, **kwargs): kwargs.pop('major', None)) minor_axis = (minor_axis if minor_axis is not None else kwargs.pop('minor', None)) - return super(Panel, self).rename(items=items, major_axis=major_axis, - minor_axis=minor_axis, **kwargs) + return super().rename(items=items, major_axis=major_axis, + minor_axis=minor_axis, **kwargs) @Appender(_shared_docs['reindex_axis'] % _shared_doc_kwargs) def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True, limit=None, fill_value=np.nan): - return super(Panel, self).reindex_axis(labels=labels, axis=axis, - method=method, level=level, - copy=copy, limit=limit, - fill_value=fill_value) + return super().reindex_axis(labels=labels, axis=axis, method=method, + level=level, copy=copy, limit=limit, + fill_value=fill_value) @Substitution(**_shared_doc_kwargs) @Appender(NDFrame.transpose.__doc__) @@ -1276,15 +1275,15 @@ def transpose(self, *args, **kwargs): elif not axes: axes = kwargs.pop('axes', ()) - return super(Panel, self).transpose(*axes, **kwargs) + return super().transpose(*axes, **kwargs) @Substitution(**_shared_doc_kwargs) @Appender(NDFrame.fillna.__doc__) def fillna(self, value=None, method=None, axis=None, inplace=False, limit=None, downcast=None, **kwargs): - return super(Panel, self).fillna(value=value, method=method, axis=axis, - inplace=inplace, limit=limit, - downcast=downcast, **kwargs) + return super().fillna(value=value, method=method, axis=axis, + inplace=inplace, limit=limit, downcast=downcast, + **kwargs) def count(self, axis='major'): """ @@ -1328,10 +1327,10 @@ def shift(self, periods=1, freq=None, axis='major'): if freq: return self.tshift(periods, freq, axis=axis) - return super(Panel, self).slice_shift(periods, axis=axis) + return super().slice_shift(periods, axis=axis) def tshift(self, periods=1, freq=None, axis='major'): - return super(Panel, self).tshift(periods, freq, axis) + return super().tshift(periods, freq, axis) def join(self, other, how='left', lsuffix='', rsuffix=''): """ @@ -1565,7 +1564,7 @@ def sort_values(self, *args, **kwargs): NOT IMPLEMENTED: do not call this method, as sorting values is not supported for Panel objects and will raise an error. """ - super(Panel, self).sort_values(*args, **kwargs) + super().sort_values(*args, **kwargs) Panel._setup_axes(axes=['items', 'major_axis', 'minor_axis'], info_axis=0, diff --git a/pandas/core/resample.py b/pandas/core/resample.py index f3aaf9eba2711..4edcdd2dc1060 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -112,7 +112,7 @@ def __iter__(self): GroupBy.__iter__ """ self._set_binner() - return super(Resampler, self).__iter__() + return super().__iter__() @property def obj(self): @@ -207,7 +207,7 @@ def _assure_grouper(self): """) @Appender(_pipe_template) def pipe(self, func, *args, **kwargs): - return super(Resampler, self).pipe(func, *args, **kwargs) + return super().pipe(func, *args, **kwargs) _agg_see_also_doc = dedent(""" See Also @@ -938,7 +938,7 @@ def __init__(self, obj, *args, **kwargs): for attr in self._attributes: setattr(self, attr, kwargs.get(attr, getattr(parent, attr))) - super(_GroupByMixin, self).__init__(None) + super().__init__(None) self._groupby = groupby self._groupby.mutated = True self._groupby.grouper.mutated = True @@ -1069,7 +1069,7 @@ def _upsample(self, method, limit=None, fill_value=None): return self._wrap_result(result) def _wrap_result(self, result): - result = super(DatetimeIndexResampler, self)._wrap_result(result) + result = super()._wrap_result(result) # we may have a different kind that we were asked originally # convert if needed @@ -1097,11 +1097,11 @@ def _resampler_for_grouping(self): def _get_binner_for_time(self): if self.kind == 'timestamp': - return super(PeriodIndexResampler, self)._get_binner_for_time() + return super()._get_binner_for_time() return self.groupby._get_period_bins(self.ax) def _convert_obj(self, obj): - obj = super(PeriodIndexResampler, self)._convert_obj(obj) + obj = super()._convert_obj(obj) if self._from_selection: # see GH 14008, GH 12871 @@ -1133,7 +1133,7 @@ def _downsample(self, how, **kwargs): # we may need to actually resample as if we are timestamps if self.kind == 'timestamp': - return super(PeriodIndexResampler, self)._downsample(how, **kwargs) + return super()._downsample(how, **kwargs) how = self._is_cython_func(how) or how ax = self.ax @@ -1177,8 +1177,8 @@ def _upsample(self, method, limit=None, fill_value=None): # we may need to actually resample as if we are timestamps if self.kind == 'timestamp': - return super(PeriodIndexResampler, self)._upsample( - method, limit=limit, fill_value=fill_value) + return super()._upsample(method, limit=limit, + fill_value=fill_value) self._set_binner() ax = self.ax @@ -1329,7 +1329,7 @@ def __init__(self, freq='Min', closed=None, label=None, how='mean', # always sort time groupers kwargs['sort'] = True - super(TimeGrouper, self).__init__(freq=freq, axis=axis, **kwargs) + super().__init__(freq=freq, axis=axis, **kwargs) def _get_resampler(self, obj, kind=None): """ diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index f779c98668eec..0837186e33267 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -1386,7 +1386,7 @@ def __init__(self, left, right, on=None, left_on=None, right_on=None, fill_method=fill_method) def _validate_specification(self): - super(_AsOfMerge, self)._validate_specification() + super()._validate_specification() # we only allow on to be a single item for on if len(self.left_on) != 1 and not self.left_index: @@ -1441,7 +1441,7 @@ def _get_merge_keys(self): # note this function has side effects (left_join_keys, right_join_keys, - join_names) = super(_AsOfMerge, self)._get_merge_keys() + join_names) = super()._get_merge_keys() # validate index types are the same for i, (lk, rk) in enumerate(zip(left_join_keys, right_join_keys)): diff --git a/pandas/core/series.py b/pandas/core/series.py index 34088e86cf80a..5d8de03f91e10 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1714,7 +1714,7 @@ def unique(self): [b, a, c] Categories (3, object): [a < b < c] """ - result = super(Series, self).unique() + result = super().unique() return result def drop_duplicates(self, keep='first', inplace=False): @@ -1789,7 +1789,7 @@ def drop_duplicates(self, keep='first', inplace=False): 5 hippo Name: animal, dtype: object """ - return super(Series, self).drop_duplicates(keep=keep, inplace=inplace) + return super().drop_duplicates(keep=keep, inplace=inplace) def duplicated(self, keep='first'): """ @@ -1865,7 +1865,7 @@ def duplicated(self, keep='first'): 4 True dtype: bool """ - return super(Series, self).duplicated(keep=keep) + return super().duplicated(keep=keep) def idxmin(self, axis=0, skipna=True, *args, **kwargs): """ @@ -3466,7 +3466,7 @@ def map(self, arg, na_action=None): 3 I am a rabbit dtype: object """ - new_values = super(Series, self)._map_values( + new_values = super()._map_values( arg, na_action=na_action) return self._constructor(new_values, index=self.index).__finalize__(self) @@ -3549,7 +3549,7 @@ def aggregate(self, func, axis=0, *args, **kwargs): def transform(self, func, axis=0, *args, **kwargs): # Validate the axis parameter self._get_axis_number(axis) - return super(Series, self).transform(func, *args, **kwargs) + return super().transform(func, *args, **kwargs) def apply(self, func, convert_dtype=True, args=(), **kwds): """ @@ -3745,11 +3745,10 @@ def _needs_reindex_multi(self, axes, method, level): def align(self, other, join='outer', axis=None, level=None, copy=True, fill_value=None, method=None, limit=None, fill_axis=0, broadcast_axis=None): - return super(Series, self).align(other, join=join, axis=axis, - level=level, copy=copy, - fill_value=fill_value, method=method, - limit=limit, fill_axis=fill_axis, - broadcast_axis=broadcast_axis) + return super().align(other, join=join, axis=axis, level=level, + copy=copy, fill_value=fill_value, method=method, + limit=limit, fill_axis=fill_axis, + broadcast_axis=broadcast_axis) def rename(self, index=None, **kwargs): """ @@ -3819,12 +3818,12 @@ def rename(self, index=None, **kwargs): not is_dict_like(index)) if non_mapping: return self._set_name(index, inplace=kwargs.get('inplace')) - return super(Series, self).rename(index=index, **kwargs) + return super().rename(index=index, **kwargs) @Substitution(**_shared_doc_kwargs) @Appender(generic.NDFrame.reindex.__doc__) def reindex(self, index=None, **kwargs): - return super(Series, self).reindex(index=index, **kwargs) + return super().reindex(index=index, **kwargs) def drop(self, labels=None, axis=0, index=None, columns=None, level=None, inplace=False, errors='raise'): @@ -3914,30 +3913,29 @@ def drop(self, labels=None, axis=0, index=None, columns=None, length 0.3 dtype: float64 """ - return super(Series, self).drop(labels=labels, axis=axis, index=index, - columns=columns, level=level, - inplace=inplace, errors=errors) + return super().drop(labels=labels, axis=axis, index=index, + columns=columns, level=level, inplace=inplace, + errors=errors) @Substitution(**_shared_doc_kwargs) @Appender(generic.NDFrame.fillna.__doc__) def fillna(self, value=None, method=None, axis=None, inplace=False, limit=None, downcast=None, **kwargs): - return super(Series, self).fillna(value=value, method=method, - axis=axis, inplace=inplace, - limit=limit, downcast=downcast, - **kwargs) + return super().fillna(value=value, method=method, axis=axis, + inplace=inplace, limit=limit, downcast=downcast, + **kwargs) @Appender(generic._shared_docs['replace'] % _shared_doc_kwargs) def replace(self, to_replace=None, value=None, inplace=False, limit=None, regex=False, method='pad'): - return super(Series, self).replace(to_replace=to_replace, value=value, - inplace=inplace, limit=limit, - regex=regex, method=method) + return super().replace(to_replace=to_replace, value=value, + inplace=inplace, limit=limit, regex=regex, + method=method) @Appender(generic._shared_docs['shift'] % _shared_doc_kwargs) def shift(self, periods=1, freq=None, axis=0, fill_value=None): - return super(Series, self).shift(periods=periods, freq=freq, axis=axis, - fill_value=fill_value) + return super().shift(periods=periods, freq=freq, axis=axis, + fill_value=fill_value) def reindex_axis(self, labels, axis=0, **kwargs): """ @@ -4004,7 +4002,7 @@ def memory_usage(self, index=True, deep=False): >>> s.memory_usage(deep=True) 212 """ - v = super(Series, self).memory_usage(deep=deep) + v = super().memory_usage(deep=deep) if index: v += self.index.memory_usage(deep=deep) return v @@ -4296,19 +4294,19 @@ def to_csv(self, *args, **kwargs): @Appender(generic._shared_docs['isna'] % _shared_doc_kwargs) def isna(self): - return super(Series, self).isna() + return super().isna() @Appender(generic._shared_docs['isna'] % _shared_doc_kwargs) def isnull(self): - return super(Series, self).isnull() + return super().isnull() @Appender(generic._shared_docs['notna'] % _shared_doc_kwargs) def notna(self): - return super(Series, self).notna() + return super().notna() @Appender(generic._shared_docs['notna'] % _shared_doc_kwargs) def notnull(self): - return super(Series, self).notnull() + return super().notnull() def dropna(self, axis=0, inplace=False, **kwargs): """ diff --git a/pandas/core/sparse/frame.py b/pandas/core/sparse/frame.py index 31e94b4770b0e..22f5df411a2ad 100644 --- a/pandas/core/sparse/frame.py +++ b/pandas/core/sparse/frame.py @@ -356,7 +356,7 @@ def copy(self, deep=True): """ Make a copy of this SparseDataFrame """ - result = super(SparseDataFrame, self).copy(deep=deep) + result = super().copy(deep=deep) result._default_fill_value = self._default_fill_value result._default_kind = self._default_kind return result @@ -382,10 +382,9 @@ def density(self): def fillna(self, value=None, method=None, axis=0, inplace=False, limit=None, downcast=None): - new_self = super(SparseDataFrame, - self).fillna(value=value, method=method, axis=axis, - inplace=inplace, limit=limit, - downcast=downcast) + new_self = super().fillna(value=value, method=method, axis=axis, + inplace=inplace, limit=limit, + downcast=downcast) if not inplace: self = new_self diff --git a/pandas/core/sparse/series.py b/pandas/core/sparse/series.py index 11231ce90b6b9..fc14b20f0c6fe 100644 --- a/pandas/core/sparse/series.py +++ b/pandas/core/sparse/series.py @@ -85,7 +85,7 @@ def __init__(self, data=None, index=None, sparse_index=None, kind='block', elif is_scalar(data) and index is not None: data = np.full(len(index), fill_value=data) - super(SparseSeries, self).__init__( + super().__init__( SparseArray(data, sparse_index=sparse_index, kind=kind, @@ -293,7 +293,7 @@ def __getitem__(self, key): if is_integer(key) and key not in self.index: return self._get_val_at(key) else: - return super(SparseSeries, self).__getitem__(key) + return super().__getitem__(key) def _get_values(self, indexer): try: @@ -464,9 +464,8 @@ def copy(self, deep=True): def reindex(self, index=None, method=None, copy=True, limit=None, **kwargs): # TODO: remove? - return super(SparseSeries, self).reindex(index=index, method=method, - copy=copy, limit=limit, - **kwargs) + return super().reindex(index=index, method=method, copy=copy, + limit=limit, **kwargs) def sparse_reindex(self, new_index): """ diff --git a/pandas/core/window.py b/pandas/core/window.py index eb65ca7a92584..3bf63c1c10356 100644 --- a/pandas/core/window.py +++ b/pandas/core/window.py @@ -585,7 +585,7 @@ class Window(_Window): """ def validate(self): - super(Window, self).validate() + super().validate() window = self.window if isinstance(window, (list, tuple, np.ndarray)): @@ -773,7 +773,7 @@ def __init__(self, obj, *args, **kwargs): self._groupby = groupby self._groupby.mutated = True self._groupby.grouper.mutated = True - super(GroupByMixin, self).__init__(obj, *args, **kwargs) + super().__init__(obj, *args, **kwargs) count = GroupByMixin._dispatch('count') corr = GroupByMixin._dispatch('corr', other=None, pairwise=None) @@ -1565,7 +1565,7 @@ def _on(self): "or None".format(self.on)) def validate(self): - super(Rolling, self).validate() + super().validate() # we allow rolling on a datetimelike index if ((self.obj.empty or self.is_datetimelike) and @@ -1678,7 +1678,7 @@ def _validate_freq(self): axis='') @Appender(_shared_docs['aggregate']) def aggregate(self, arg, *args, **kwargs): - return super(Rolling, self).aggregate(arg, *args, **kwargs) + return super().aggregate(arg, *args, **kwargs) agg = aggregate @@ -1690,61 +1690,60 @@ def count(self): if self.is_freq_type: return self._apply('roll_count', 'count') - return super(Rolling, self).count() + return super().count() @Substitution(name='rolling') @Appender(_shared_docs['apply']) def apply(self, func, raw=None, args=(), kwargs={}): - return super(Rolling, self).apply( - func, raw=raw, args=args, kwargs=kwargs) + return super().apply(func, raw=raw, args=args, kwargs=kwargs) @Substitution(name='rolling') @Appender(_shared_docs['sum']) def sum(self, *args, **kwargs): nv.validate_rolling_func('sum', args, kwargs) - return super(Rolling, self).sum(*args, **kwargs) + return super().sum(*args, **kwargs) @Substitution(name='rolling') @Appender(_doc_template) @Appender(_shared_docs['max']) def max(self, *args, **kwargs): nv.validate_rolling_func('max', args, kwargs) - return super(Rolling, self).max(*args, **kwargs) + return super().max(*args, **kwargs) @Substitution(name='rolling') @Appender(_shared_docs['min']) def min(self, *args, **kwargs): nv.validate_rolling_func('min', args, kwargs) - return super(Rolling, self).min(*args, **kwargs) + return super().min(*args, **kwargs) @Substitution(name='rolling') @Appender(_shared_docs['mean']) def mean(self, *args, **kwargs): nv.validate_rolling_func('mean', args, kwargs) - return super(Rolling, self).mean(*args, **kwargs) + return super().mean(*args, **kwargs) @Substitution(name='rolling') @Appender(_shared_docs['median']) def median(self, **kwargs): - return super(Rolling, self).median(**kwargs) + return super().median(**kwargs) @Substitution(name='rolling') @Appender(_shared_docs['std']) def std(self, ddof=1, *args, **kwargs): nv.validate_rolling_func('std', args, kwargs) - return super(Rolling, self).std(ddof=ddof, **kwargs) + return super().std(ddof=ddof, **kwargs) @Substitution(name='rolling') @Appender(_shared_docs['var']) def var(self, ddof=1, *args, **kwargs): nv.validate_rolling_func('var', args, kwargs) - return super(Rolling, self).var(ddof=ddof, **kwargs) + return super().var(ddof=ddof, **kwargs) @Substitution(name='rolling') @Appender(_doc_template) @Appender(_shared_docs['skew']) def skew(self, **kwargs): - return super(Rolling, self).skew(**kwargs) + return super().skew(**kwargs) _agg_doc = dedent(""" Examples @@ -1774,27 +1773,24 @@ def skew(self, **kwargs): @Substitution(name='rolling') @Appender(_shared_docs['kurt']) def kurt(self, **kwargs): - return super(Rolling, self).kurt(**kwargs) + return super().kurt(**kwargs) @Substitution(name='rolling') @Appender(_shared_docs['quantile']) def quantile(self, quantile, interpolation='linear', **kwargs): - return super(Rolling, self).quantile(quantile=quantile, - interpolation=interpolation, - **kwargs) + return super().quantile(quantile=quantile, interpolation=interpolation, + **kwargs) @Substitution(name='rolling') @Appender(_doc_template) @Appender(_shared_docs['cov']) def cov(self, other=None, pairwise=None, ddof=1, **kwargs): - return super(Rolling, self).cov(other=other, pairwise=pairwise, - ddof=ddof, **kwargs) + return super().cov(other=other, pairwise=pairwise, ddof=ddof, **kwargs) @Substitution(name='rolling') @Appender(_shared_docs['corr']) def corr(self, other=None, pairwise=None, **kwargs): - return super(Rolling, self).corr(other=other, pairwise=pairwise, - **kwargs) + return super().corr(other=other, pairwise=pairwise, **kwargs) class RollingGroupby(_GroupByMixin, Rolling): @@ -1816,7 +1812,7 @@ def _gotitem(self, key, ndim, subset=None): if self.on is not None: self._groupby.obj = self._groupby.obj.set_index(self._on) self.on = None - return super(RollingGroupby, self)._gotitem(key, ndim, subset=subset) + return super()._gotitem(key, ndim, subset=subset) def _validate_monotonic(self): """ @@ -1881,8 +1877,8 @@ class Expanding(_Rolling_and_Expanding): def __init__(self, obj, min_periods=1, center=False, axis=0, **kwargs): - super(Expanding, self).__init__(obj=obj, min_periods=min_periods, - center=center, axis=axis) + super().__init__(obj=obj, min_periods=min_periods, center=center, + axis=axis) @property def _constructor(self): @@ -1956,68 +1952,68 @@ def _get_window(self, other=None): axis='') @Appender(_shared_docs['aggregate']) def aggregate(self, arg, *args, **kwargs): - return super(Expanding, self).aggregate(arg, *args, **kwargs) + return super().aggregate(arg, *args, **kwargs) agg = aggregate @Substitution(name='expanding') @Appender(_shared_docs['count']) def count(self, **kwargs): - return super(Expanding, self).count(**kwargs) + return super().count(**kwargs) @Substitution(name='expanding') @Appender(_shared_docs['apply']) def apply(self, func, raw=None, args=(), kwargs={}): - return super(Expanding, self).apply( + return super().apply( func, raw=raw, args=args, kwargs=kwargs) @Substitution(name='expanding') @Appender(_shared_docs['sum']) def sum(self, *args, **kwargs): nv.validate_expanding_func('sum', args, kwargs) - return super(Expanding, self).sum(*args, **kwargs) + return super().sum(*args, **kwargs) @Substitution(name='expanding') @Appender(_doc_template) @Appender(_shared_docs['max']) def max(self, *args, **kwargs): nv.validate_expanding_func('max', args, kwargs) - return super(Expanding, self).max(*args, **kwargs) + return super().max(*args, **kwargs) @Substitution(name='expanding') @Appender(_shared_docs['min']) def min(self, *args, **kwargs): nv.validate_expanding_func('min', args, kwargs) - return super(Expanding, self).min(*args, **kwargs) + return super().min(*args, **kwargs) @Substitution(name='expanding') @Appender(_shared_docs['mean']) def mean(self, *args, **kwargs): nv.validate_expanding_func('mean', args, kwargs) - return super(Expanding, self).mean(*args, **kwargs) + return super().mean(*args, **kwargs) @Substitution(name='expanding') @Appender(_shared_docs['median']) def median(self, **kwargs): - return super(Expanding, self).median(**kwargs) + return super().median(**kwargs) @Substitution(name='expanding') @Appender(_shared_docs['std']) def std(self, ddof=1, *args, **kwargs): nv.validate_expanding_func('std', args, kwargs) - return super(Expanding, self).std(ddof=ddof, **kwargs) + return super().std(ddof=ddof, **kwargs) @Substitution(name='expanding') @Appender(_shared_docs['var']) def var(self, ddof=1, *args, **kwargs): nv.validate_expanding_func('var', args, kwargs) - return super(Expanding, self).var(ddof=ddof, **kwargs) + return super().var(ddof=ddof, **kwargs) @Substitution(name='expanding') @Appender(_doc_template) @Appender(_shared_docs['skew']) def skew(self, **kwargs): - return super(Expanding, self).skew(**kwargs) + return super().skew(**kwargs) _agg_doc = dedent(""" Examples @@ -2047,27 +2043,25 @@ def skew(self, **kwargs): @Substitution(name='expanding') @Appender(_shared_docs['kurt']) def kurt(self, **kwargs): - return super(Expanding, self).kurt(**kwargs) + return super().kurt(**kwargs) @Substitution(name='expanding') @Appender(_shared_docs['quantile']) def quantile(self, quantile, interpolation='linear', **kwargs): - return super(Expanding, self).quantile(quantile=quantile, - interpolation=interpolation, - **kwargs) + return super().quantile(quantile=quantile, + interpolation=interpolation, + **kwargs) @Substitution(name='expanding') @Appender(_doc_template) @Appender(_shared_docs['cov']) def cov(self, other=None, pairwise=None, ddof=1, **kwargs): - return super(Expanding, self).cov(other=other, pairwise=pairwise, - ddof=ddof, **kwargs) + return super().cov(other=other, pairwise=pairwise, ddof=ddof, **kwargs) @Substitution(name='expanding') @Appender(_shared_docs['corr']) def corr(self, other=None, pairwise=None, **kwargs): - return super(Expanding, self).corr(other=other, pairwise=pairwise, - **kwargs) + return super().corr(other=other, pairwise=pairwise, **kwargs) class ExpandingGroupby(_GroupByMixin, Expanding): @@ -2267,7 +2261,7 @@ def _constructor(self): axis='') @Appender(_shared_docs['aggregate']) def aggregate(self, arg, *args, **kwargs): - return super(EWM, self).aggregate(arg, *args, **kwargs) + return super().aggregate(arg, *args, **kwargs) agg = aggregate diff --git a/pandas/io/clipboard/exceptions.py b/pandas/io/clipboard/exceptions.py index d948ad414327c..6276b06b9d7fe 100644 --- a/pandas/io/clipboard/exceptions.py +++ b/pandas/io/clipboard/exceptions.py @@ -9,4 +9,4 @@ class PyperclipWindowsException(PyperclipException): def __init__(self, message): message += " ({err})".format(err=ctypes.WinError()) - super(PyperclipWindowsException, self).__init__(message) + super().__init__(message) diff --git a/pandas/io/clipboard/windows.py b/pandas/io/clipboard/windows.py index ecf4598a505e0..72abc72966342 100644 --- a/pandas/io/clipboard/windows.py +++ b/pandas/io/clipboard/windows.py @@ -12,7 +12,7 @@ class CheckedCall: def __init__(self, f): - super(CheckedCall, self).__setattr__("f", f) + super().__setattr__("f", f) def __call__(self, *args): ret = self.f(*args) diff --git a/pandas/io/common.py b/pandas/io/common.py index fb254b1f3c74e..f9cd1806763e2 100644 --- a/pandas/io/common.py +++ b/pandas/io/common.py @@ -422,10 +422,10 @@ class BytesZipFile(zipfile.ZipFile, BytesIO): # type: ignore def __init__(self, file, mode, compression=zipfile.ZIP_DEFLATED, **kwargs): if mode in ['wb', 'rb']: mode = mode.replace('b', '') - super(BytesZipFile, self).__init__(file, mode, compression, **kwargs) + super().__init__(file, mode, compression, **kwargs) def write(self, data): - super(BytesZipFile, self).writestr(self.filename, data) + super().writestr(self.filename, data) @property def closed(self): diff --git a/pandas/io/excel/_openpyxl.py b/pandas/io/excel/_openpyxl.py index 8d79c13a65c97..645124c28b2fd 100644 --- a/pandas/io/excel/_openpyxl.py +++ b/pandas/io/excel/_openpyxl.py @@ -10,7 +10,7 @@ def __init__(self, path, engine=None, mode='w', **engine_kwargs): # Use the openpyxl module as the Excel writer. from openpyxl.workbook import Workbook - super(_OpenpyxlWriter, self).__init__(path, mode=mode, **engine_kwargs) + super().__init__(path, mode=mode, **engine_kwargs) if self.mode == 'a': # Load from existing workbook from openpyxl import load_workbook diff --git a/pandas/io/excel/_xlsxwriter.py b/pandas/io/excel/_xlsxwriter.py index 5504665c6bfb1..2dc736f81f6f8 100644 --- a/pandas/io/excel/_xlsxwriter.py +++ b/pandas/io/excel/_xlsxwriter.py @@ -160,11 +160,11 @@ def __init__(self, path, engine=None, if mode == 'a': raise ValueError('Append mode is not supported with xlsxwriter!') - super(_XlsxWriter, self).__init__(path, engine=engine, - date_format=date_format, - datetime_format=datetime_format, - mode=mode, - **engine_kwargs) + super().__init__(path, engine=engine, + date_format=date_format, + datetime_format=datetime_format, + mode=mode, + **engine_kwargs) self.book = xlsxwriter.Workbook(path, **engine_kwargs) diff --git a/pandas/io/excel/_xlwt.py b/pandas/io/excel/_xlwt.py index 191fbe914b750..62a57b99fe556 100644 --- a/pandas/io/excel/_xlwt.py +++ b/pandas/io/excel/_xlwt.py @@ -17,7 +17,7 @@ def __init__(self, path, engine=None, encoding=None, mode='w', if mode == 'a': raise ValueError('Append mode is not supported with xlwt!') - super(_XlwtWriter, self).__init__(path, mode=mode, **engine_kwargs) + super().__init__(path, mode=mode, **engine_kwargs) if encoding is None: encoding = 'ascii' diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index 287672d40c9a5..1a709421d49be 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -309,7 +309,7 @@ def adjoin(self, space, *lists, **kwargs): class EastAsianTextAdjustment(TextAdjustment): def __init__(self): - super(EastAsianTextAdjustment, self).__init__() + super().__init__() if get_option("display.unicode.ambiguous_as_wide"): self.ambiguous_width = 2 else: @@ -1160,7 +1160,7 @@ def _format_strings(self): class Datetime64Formatter(GenericArrayFormatter): def __init__(self, values, nat_rep='NaT', date_format=None, **kwargs): - super(Datetime64Formatter, self).__init__(values, **kwargs) + super().__init__(values, **kwargs) self.nat_rep = nat_rep self.date_format = date_format @@ -1346,7 +1346,7 @@ def _format_strings(self): class Timedelta64Formatter(GenericArrayFormatter): def __init__(self, values, nat_rep='NaT', box=False, **kwargs): - super(Timedelta64Formatter, self).__init__(values, **kwargs) + super().__init__(values, **kwargs) self.nat_rep = nat_rep self.box = box diff --git a/pandas/io/formats/html.py b/pandas/io/formats/html.py index 031891cb2f7cb..1573a720597d2 100644 --- a/pandas/io/formats/html.py +++ b/pandas/io/formats/html.py @@ -540,6 +540,6 @@ def write_style(self): def render(self): self.write('
') self.write_style() - super(NotebookFormatter, self).render() + super().render() self.write('
') return self.elements diff --git a/pandas/io/html.py b/pandas/io/html.py index 641dfe73e24af..d8d6d175f0c86 100644 --- a/pandas/io/html.py +++ b/pandas/io/html.py @@ -533,8 +533,7 @@ class _BeautifulSoupHtml5LibFrameParser(_HtmlFrameParser): """ def __init__(self, *args, **kwargs): - super(_BeautifulSoupHtml5LibFrameParser, self).__init__(*args, - **kwargs) + super().__init__(*args, **kwargs) from bs4 import SoupStrainer self._strainer = SoupStrainer('table') @@ -644,7 +643,7 @@ class _LxmlFrameParser(_HtmlFrameParser): """ def __init__(self, *args, **kwargs): - super(_LxmlFrameParser, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) def _text_getter(self, obj): return obj.text_content() diff --git a/pandas/io/json/json.py b/pandas/io/json/json.py index dbf7f4f49ce86..ee9d9e000d7e3 100644 --- a/pandas/io/json/json.py +++ b/pandas/io/json/json.py @@ -128,10 +128,8 @@ def _write(self, obj, orient, double_precision, ensure_ascii, date_unit, iso_dates, default_handler): if not self.index and orient == 'split': obj = {"name": obj.name, "data": obj.values} - return super(SeriesWriter, self)._write(obj, orient, - double_precision, - ensure_ascii, date_unit, - iso_dates, default_handler) + return super()._write(obj, orient, double_precision, ensure_ascii, + date_unit, iso_dates, default_handler) class FrameWriter(Writer): @@ -155,10 +153,8 @@ def _write(self, obj, orient, double_precision, ensure_ascii, if not self.index and orient == 'split': obj = obj.to_dict(orient='split') del obj["index"] - return super(FrameWriter, self)._write(obj, orient, - double_precision, - ensure_ascii, date_unit, - iso_dates, default_handler) + return super()._write(obj, orient, double_precision, ensure_ascii, + date_unit, iso_dates, default_handler) class JSONTableWriter(FrameWriter): @@ -172,9 +168,9 @@ def __init__(self, obj, orient, date_format, double_precision, to know what the index is, forces orient to records, and forces date_format to 'iso'. """ - super(JSONTableWriter, self).__init__( - obj, orient, date_format, double_precision, ensure_ascii, - date_unit, index, default_handler=default_handler) + super().__init__(obj, orient, date_format, double_precision, + ensure_ascii, date_unit, index, + default_handler=default_handler) if date_format != 'iso': msg = ("Trying to write with `orient='table'` and " @@ -216,11 +212,8 @@ def __init__(self, obj, orient, date_format, double_precision, def _write(self, obj, orient, double_precision, ensure_ascii, date_unit, iso_dates, default_handler): - data = super(JSONTableWriter, self)._write(obj, orient, - double_precision, - ensure_ascii, date_unit, - iso_dates, - default_handler) + data = super()._write(obj, orient, double_precision, ensure_ascii, + date_unit, iso_dates, default_handler) serialized = '{{"schema": {schema}, "data": {data}}}'.format( schema=dumps(self.schema), data=data) return serialized diff --git a/pandas/io/packers.py b/pandas/io/packers.py index b67685c37de90..1309bd1fef421 100644 --- a/pandas/io/packers.py +++ b/pandas/io/packers.py @@ -751,12 +751,11 @@ def __init__(self, default=encode, use_single_float=False, autoreset=1, use_bin_type=1): - super(Packer, self).__init__(default=default, - encoding=encoding, - unicode_errors=unicode_errors, - use_single_float=use_single_float, - autoreset=autoreset, - use_bin_type=use_bin_type) + super().__init__(default=default, encoding=encoding, + unicode_errors=unicode_errors, + use_single_float=use_single_float, + autoreset=autoreset, + use_bin_type=use_bin_type) class Unpacker(_Unpacker): @@ -765,16 +764,16 @@ def __init__(self, file_like=None, read_size=0, use_list=False, object_hook=decode, object_pairs_hook=None, list_hook=None, encoding='utf-8', unicode_errors='strict', max_buffer_size=0, ext_hook=ExtType): - super(Unpacker, self).__init__(file_like=file_like, - read_size=read_size, - use_list=use_list, - object_hook=object_hook, - object_pairs_hook=object_pairs_hook, - list_hook=list_hook, - encoding=encoding, - unicode_errors=unicode_errors, - max_buffer_size=max_buffer_size, - ext_hook=ext_hook) + super().__init__(file_like=file_like, + read_size=read_size, + use_list=use_list, + object_hook=object_hook, + object_pairs_hook=object_pairs_hook, + list_hook=list_hook, + encoding=encoding, + unicode_errors=unicode_errors, + max_buffer_size=max_buffer_size, + ext_hook=ext_hook) class Iterator: diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 85cc738667a34..19068eca38775 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -1860,8 +1860,8 @@ def create_for_block( def __init__(self, values=None, kind=None, typ=None, cname=None, data=None, meta=None, metadata=None, block=None, **kwargs): - super(DataCol, self).__init__(values=values, kind=kind, typ=typ, - cname=cname, **kwargs) + super().__init__(values=values, kind=kind, typ=typ, cname=cname, + **kwargs) self.dtype = None self.dtype_attr = '{name}_dtype'.format(name=self.name) self.meta = meta @@ -2848,7 +2848,7 @@ def read(self, **kwargs): return Series(values, index=index, name=self.name) def write(self, obj, **kwargs): - super(SeriesFixed, self).write(obj, **kwargs) + super().write(obj, **kwargs) self.write_index('index', obj.index) self.write_array('values', obj.values) self.attrs.name = obj.name @@ -2860,7 +2860,7 @@ def validate_read(self, kwargs): """ we don't support start, stop kwds in Sparse """ - kwargs = super(SparseFixed, self).validate_read(kwargs) + kwargs = super().validate_read(kwargs) if 'start' in kwargs or 'stop' in kwargs: raise NotImplementedError("start and/or stop are not supported " "in fixed Sparse reading") @@ -2882,7 +2882,7 @@ def read(self, **kwargs): name=self.name) def write(self, obj, **kwargs): - super(SparseSeriesFixed, self).write(obj, **kwargs) + super().write(obj, **kwargs) self.write_index('index', obj.index) self.write_index('sp_index', obj.sp_index) self.write_array('sp_values', obj.sp_values) @@ -2910,7 +2910,7 @@ def read(self, **kwargs): def write(self, obj, **kwargs): """ write it as a collection of individual sparse series """ - super(SparseFrameFixed, self).write(obj, **kwargs) + super().write(obj, **kwargs) for name, ss in obj.items(): key = 'sparse_series_{name}'.format(name=name) if key not in self.group._v_children: @@ -2987,7 +2987,7 @@ def read(self, start=None, stop=None, **kwargs): return self.obj_type(BlockManager(blocks, axes)) def write(self, obj, **kwargs): - super(BlockManagerFixed, self).write(obj, **kwargs) + super().write(obj, **kwargs) data = obj._data if not data.is_consolidated(): data = data.consolidate() @@ -3047,7 +3047,7 @@ class Table(Fixed): is_shape_reversed = False def __init__(self, *args, **kwargs): - super(Table, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.index_axes = [] self.non_index_axes = [] self.values_axes = [] @@ -4199,8 +4199,8 @@ def write(self, obj, data_columns=None, **kwargs): name = obj.name or 'values' obj = DataFrame({name: obj}, index=obj.index) obj.columns = [name] - return super(AppendableSeriesTable, self).write( - obj=obj, data_columns=obj.columns.tolist(), **kwargs) + return super().write(obj=obj, data_columns=obj.columns.tolist(), + **kwargs) def read(self, columns=None, **kwargs): @@ -4209,7 +4209,7 @@ def read(self, columns=None, **kwargs): for n in self.levels: if n not in columns: columns.insert(0, n) - s = super(AppendableSeriesTable, self).read(columns=columns, **kwargs) + s = super().read(columns=columns, **kwargs) if is_multi_index: s.set_index(self.levels, inplace=True) @@ -4233,7 +4233,7 @@ def write(self, obj, **kwargs): cols = list(self.levels) cols.append(name) obj.columns = cols - return super(AppendableMultiSeriesTable, self).write(obj=obj, **kwargs) + return super().write(obj=obj, **kwargs) class GenericTable(AppendableFrameTable): @@ -4306,12 +4306,11 @@ def write(self, obj, data_columns=None, **kwargs): for n in self.levels: if n not in data_columns: data_columns.insert(0, n) - return super(AppendableMultiFrameTable, self).write( - obj=obj, data_columns=data_columns, **kwargs) + return super().write(obj=obj, data_columns=data_columns, **kwargs) def read(self, **kwargs): - df = super(AppendableMultiFrameTable, self).read(**kwargs) + df = super().read(**kwargs) df = df.set_index(self.levels) # remove names for 'level_%d' diff --git a/pandas/io/sql.py b/pandas/io/sql.py index 789b6ca7e97bd..18c3ee1415df7 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -1279,7 +1279,7 @@ def __init__(self, *args, **kwargs): # this will transform time(12,34,56,789) into '12:34:56.000789' # (this is what sqlalchemy does) sqlite3.register_adapter(time, lambda _: _.strftime("%H:%M:%S.%f")) - super(SQLiteTable, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) def sql_schema(self): return str(";\n".join(self.table)) diff --git a/pandas/io/stata.py b/pandas/io/stata.py index 2545cb38b1de9..550a6ca3cdc9f 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -955,7 +955,7 @@ def __init__(self, path_or_buf, convert_dates=True, convert_missing=False, preserve_dtypes=True, columns=None, order_categoricals=True, encoding=None, chunksize=None): - super(StataReader, self).__init__() + super().__init__() self.col_sizes = () # Arguments to the reader (can be temporarily overridden in @@ -1997,7 +1997,7 @@ class StataWriter(StataParser): def __init__(self, fname, data, convert_dates=None, write_index=True, encoding="latin-1", byteorder=None, time_stamp=None, data_label=None, variable_labels=None): - super(StataWriter, self).__init__() + super().__init__() self._convert_dates = {} if convert_dates is None else convert_dates self._write_index = write_index self._encoding = 'latin-1' @@ -2750,11 +2750,10 @@ def __init__(self, fname, data, convert_dates=None, write_index=True, # Shallow copy since convert_strl might be modified later self._convert_strl = [] if convert_strl is None else convert_strl[:] - super(StataWriter117, self).__init__(fname, data, convert_dates, - write_index, byteorder=byteorder, - time_stamp=time_stamp, - data_label=data_label, - variable_labels=variable_labels) + super().__init__(fname, data, convert_dates, write_index, + byteorder=byteorder, time_stamp=time_stamp, + data_label=data_label, + variable_labels=variable_labels) self._map = None self._strl_blob = None diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index 7b1e8a8f0aaeb..92f2f2a69c665 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -883,7 +883,7 @@ def __init__(self, data, x, y, s=None, c=None, **kwargs): # hide the matplotlib default for size, in case we want to change # the handling of this argument later s = 20 - super(ScatterPlot, self).__init__(data, x, y, s=s, **kwargs) + super().__init__(data, x, y, s=s, **kwargs) if is_integer(c) and not self.data.columns.holds_integer(): c = self.data.columns[c] self.c = c @@ -940,7 +940,7 @@ class HexBinPlot(PlanePlot): _kind = 'hexbin' def __init__(self, data, x, y, C=None, **kwargs): - super(HexBinPlot, self).__init__(data, x, y, **kwargs) + super().__init__(data, x, y, **kwargs) if is_integer(C) and not self.data.columns.holds_integer(): C = self.data.columns[C] self.C = C @@ -1723,7 +1723,7 @@ def orientation(self): @property def result(self): if self.return_type is None: - return super(BoxPlot, self).result + return super().result else: return self._return_obj diff --git a/pandas/plotting/_style.py b/pandas/plotting/_style.py index 0947908803559..72b14e721c6df 100644 --- a/pandas/plotting/_style.py +++ b/pandas/plotting/_style.py @@ -110,29 +110,29 @@ class _Options(dict): def __init__(self, deprecated=False): self._deprecated = deprecated # self['xaxis.compat'] = False - super(_Options, self).__setitem__('xaxis.compat', False) + super().__setitem__('xaxis.compat', False) def __getitem__(self, key): key = self._get_canonical_key(key) if key not in self: raise ValueError( '{key} is not a valid pandas plotting option'.format(key=key)) - return super(_Options, self).__getitem__(key) + return super().__getitem__(key) def __setitem__(self, key, value): key = self._get_canonical_key(key) - return super(_Options, self).__setitem__(key, value) + return super().__setitem__(key, value) def __delitem__(self, key): key = self._get_canonical_key(key) if key in self._DEFAULT_KEYS: raise ValueError( 'Cannot remove default parameter {key}'.format(key=key)) - return super(_Options, self).__delitem__(key) + return super().__delitem__(key) def __contains__(self, key): key = self._get_canonical_key(key) - return super(_Options, self).__contains__(key) + return super().__contains__(key) def reset(self): """ diff --git a/pandas/tests/arrays/test_integer.py b/pandas/tests/arrays/test_integer.py index 4512e98ebe0cf..4769f3842b08a 100644 --- a/pandas/tests/arrays/test_integer.py +++ b/pandas/tests/arrays/test_integer.py @@ -120,7 +120,7 @@ def test_from_dtype_from_float(self, data): class TestArithmeticOps(BaseOpsUtil): def _check_divmod_op(self, s, op, other, exc=None): - super(TestArithmeticOps, self)._check_divmod_op(s, op, other, None) + super()._check_divmod_op(s, op, other, None) def _check_op(self, s, op_name, other, exc=None): op = self.get_op_from_name(op_name) diff --git a/pandas/tests/extension/arrow/bool.py b/pandas/tests/extension/arrow/bool.py index 025c4cacd8fa1..435ea4e3ec2b5 100644 --- a/pandas/tests/extension/arrow/bool.py +++ b/pandas/tests/extension/arrow/bool.py @@ -82,7 +82,7 @@ def astype(self, dtype, copy=True): if copy: return self.copy() return self - return super(ArrowBoolArray, self).astype(dtype, copy) + return super().astype(dtype, copy) @property def dtype(self): diff --git a/pandas/tests/extension/arrow/test_bool.py b/pandas/tests/extension/arrow/test_bool.py index 2aece66d94150..01163064b0918 100644 --- a/pandas/tests/extension/arrow/test_bool.py +++ b/pandas/tests/extension/arrow/test_bool.py @@ -47,7 +47,7 @@ def test_from_dtype(self, data): # seems like some bug in isna on empty BoolArray returning floats. @pytest.mark.xfail(reason='bad is-na for empty data') def test_from_sequence_from_cls(self, data): - super(TestConstructors, self).test_from_sequence_from_cls(data) + super().test_from_sequence_from_cls(data) class TestReduce(base.BaseNoReduceTests): diff --git a/pandas/tests/extension/decimal/test_decimal.py b/pandas/tests/extension/decimal/test_decimal.py index 8c9d7fd756377..87f67b9843bae 100644 --- a/pandas/tests/extension/decimal/test_decimal.py +++ b/pandas/tests/extension/decimal/test_decimal.py @@ -262,8 +262,7 @@ def test_astype_dispatches(frame): class TestArithmeticOps(BaseDecimal, base.BaseArithmeticOpsTests): def check_opname(self, s, op_name, other, exc=None): - super(TestArithmeticOps, self).check_opname(s, op_name, - other, exc=None) + super().check_opname(s, op_name, other, exc=None) def test_arith_series_with_array(self, data, all_arithmetic_operators): op_name = all_arithmetic_operators @@ -289,9 +288,7 @@ def test_arith_series_with_array(self, data, all_arithmetic_operators): def _check_divmod_op(self, s, op, other, exc=NotImplementedError): # We implement divmod - super(TestArithmeticOps, self)._check_divmod_op( - s, op, other, exc=None - ) + super()._check_divmod_op(s, op, other, exc=None) def test_error(self): pass @@ -300,8 +297,7 @@ def test_error(self): class TestComparisonOps(BaseDecimal, base.BaseComparisonOpsTests): def check_opname(self, s, op_name, other, exc=None): - super(TestComparisonOps, self).check_opname(s, op_name, - other, exc=None) + super().check_opname(s, op_name, other, exc=None) def _compare_other(self, s, data, op_name, other): self.check_opname(s, op_name, other) diff --git a/pandas/tests/extension/json/test_json.py b/pandas/tests/extension/json/test_json.py index 5eb6aba710234..97c329e0a5c92 100644 --- a/pandas/tests/extension/json/test_json.py +++ b/pandas/tests/extension/json/test_json.py @@ -188,24 +188,21 @@ def test_sort_values_frame(self): @unstable def test_argsort(self, data_for_sorting): - super(TestMethods, self).test_argsort(data_for_sorting) + super().test_argsort(data_for_sorting) @unstable def test_argsort_missing(self, data_missing_for_sorting): - super(TestMethods, self).test_argsort_missing( - data_missing_for_sorting) + super().test_argsort_missing(data_missing_for_sorting) @unstable @pytest.mark.parametrize('ascending', [True, False]) def test_sort_values(self, data_for_sorting, ascending): - super(TestMethods, self).test_sort_values( - data_for_sorting, ascending) + super().test_sort_values(data_for_sorting, ascending) @unstable @pytest.mark.parametrize('ascending', [True, False]) def test_sort_values_missing(self, data_missing_for_sorting, ascending): - super(TestMethods, self).test_sort_values_missing( - data_missing_for_sorting, ascending) + super().test_sort_values_missing(data_missing_for_sorting, ascending) @pytest.mark.skip(reason="combine for JSONArray not supported") def test_combine_le(self, data_repeated): @@ -232,7 +229,7 @@ def test_where_series(self, data, na_value): @pytest.mark.skip(reason="Can't compare dicts.") def test_searchsorted(self, data_for_sorting): - super(TestMethods, self).test_searchsorted(data_for_sorting) + super().test_searchsorted(data_for_sorting) class TestCasting(BaseJSON, base.BaseCastingTests): @@ -274,9 +271,7 @@ def test_groupby_extension_apply(self): @unstable @pytest.mark.parametrize('as_index', [True, False]) def test_groupby_extension_agg(self, as_index, data_for_grouping): - super(TestGroupby, self).test_groupby_extension_agg( - as_index, data_for_grouping - ) + super().test_groupby_extension_agg(as_index, data_for_grouping) class TestArithmeticOps(BaseJSON, base.BaseArithmeticOpsTests): @@ -294,9 +289,7 @@ def test_divmod_series_array(self): pass def _check_divmod_op(self, s, op, other, exc=NotImplementedError): - return super(TestArithmeticOps, self)._check_divmod_op( - s, op, other, exc=TypeError - ) + return super()._check_divmod_op(s, op, other, exc=TypeError) class TestComparisonOps(BaseJSON, base.BaseComparisonOpsTests): diff --git a/pandas/tests/extension/test_categorical.py b/pandas/tests/extension/test_categorical.py index 9871d0d8f96f5..4cf9f78e1531d 100644 --- a/pandas/tests/extension/test_categorical.py +++ b/pandas/tests/extension/test_categorical.py @@ -86,7 +86,7 @@ class TestInterface(base.BaseInterfaceTests): @pytest.mark.skip(reason="Memory usage doesn't match") def test_memory_usage(self, data): # Is this deliberate? - super(TestInterface, self).test_memory_usage(data) + super().test_memory_usage(data) class TestConstructors(base.BaseConstructorsTests): @@ -105,12 +105,12 @@ def test_getitem_scalar(self, data): # CategoricalDtype.type isn't "correct" since it should # be a parent of the elements (object). But don't want # to break things by changing. - super(TestGetitem, self).test_getitem_scalar(data) + super().test_getitem_scalar(data) @skip_take def test_take(self, data, na_value, na_cmp): # TODO remove this once Categorical.take is fixed - super(TestGetitem, self).test_take(data, na_value, na_cmp) + super().test_take(data, na_value, na_cmp) @skip_take def test_take_negative(self, data): @@ -204,8 +204,7 @@ def test_arith_series_with_scalar(self, data, all_arithmetic_operators): op_name = all_arithmetic_operators if op_name != '__rmod__': - super(TestArithmeticOps, self).test_arith_series_with_scalar( - data, op_name) + super().test_arith_series_with_scalar(data, op_name) else: pytest.skip('rmod never called when string is first argument') @@ -220,9 +219,7 @@ def test_divmod_series_array(self): pass def _check_divmod_op(self, s, op, other, exc=NotImplementedError): - return super(TestArithmeticOps, self)._check_divmod_op( - s, op, other, exc=TypeError - ) + return super()._check_divmod_op(s, op, other, exc=TypeError) class TestComparisonOps(base.BaseComparisonOpsTests): diff --git a/pandas/tests/extension/test_datetime.py b/pandas/tests/extension/test_datetime.py index b228f44129623..baee04c3b79eb 100644 --- a/pandas/tests/extension/test_datetime.py +++ b/pandas/tests/extension/test_datetime.py @@ -109,7 +109,7 @@ def test_array_interface(self, data): # np.asarray(DTA) is currently always tz-naive. pytest.skip("GH-23569") else: - super(TestInterface, self).test_array_interface(data) + super().test_array_interface(data) class TestArithmeticOps(BaseDatetimeTests, base.BaseArithmeticOpsTests): @@ -122,9 +122,8 @@ def test_arith_series_with_scalar(self, data, all_arithmetic_operators): exc=None) else: # ... but not the rest. - super(TestArithmeticOps, self).test_arith_series_with_scalar( - data, all_arithmetic_operators - ) + super().test_arith_series_with_scalar(data, + all_arithmetic_operators) def test_add_series_with_extension_array(self, data): # Datetime + Datetime not implemented @@ -140,9 +139,8 @@ def test_arith_series_with_array(self, data, all_arithmetic_operators): exc=None) else: # ... but not the rest. - super(TestArithmeticOps, self).test_arith_series_with_scalar( - data, all_arithmetic_operators - ) + super().test_arith_series_with_scalar(data, + all_arithmetic_operators) def test_error(self, data, all_arithmetic_operators): pass @@ -197,7 +195,7 @@ def test_concat_mixed_dtypes(self, data): # concat(Series[datetimetz], Series[category]) uses a # plain np.array(values) on the DatetimeArray, which # drops the tz. - super(TestReshaping, self).test_concat_mixed_dtypes(data) + super().test_concat_mixed_dtypes(data) @pytest.mark.parametrize("obj", ["series", "frame"]) def test_unstack(self, obj): diff --git a/pandas/tests/extension/test_integer.py b/pandas/tests/extension/test_integer.py index e9f96390821a6..22bb086a919ca 100644 --- a/pandas/tests/extension/test_integer.py +++ b/pandas/tests/extension/test_integer.py @@ -94,8 +94,7 @@ class TestArithmeticOps(base.BaseArithmeticOpsTests): def check_opname(self, s, op_name, other, exc=None): # overwriting to indicate ops don't raise an error - super(TestArithmeticOps, self).check_opname(s, op_name, - other, exc=None) + super().check_opname(s, op_name, other, exc=None) def _check_op(self, s, op, other, op_name, exc=NotImplementedError): if exc is None: @@ -138,7 +137,7 @@ def _check_op(self, s, op, other, op_name, exc=NotImplementedError): op(s, other) def _check_divmod_op(self, s, op, other, exc=None): - super(TestArithmeticOps, self)._check_divmod_op(s, op, other, None) + super()._check_divmod_op(s, op, other, None) @pytest.mark.skip(reason="intNA does not error on ops") def test_error(self, data, all_arithmetic_operators): @@ -149,8 +148,7 @@ def test_error(self, data, all_arithmetic_operators): class TestComparisonOps(base.BaseComparisonOpsTests): def check_opname(self, s, op_name, other, exc=None): - super(TestComparisonOps, self).check_opname(s, op_name, - other, exc=None) + super().check_opname(s, op_name, other, exc=None) def _compare_other(self, s, data, op_name, other): self.check_opname(s, op_name, other) diff --git a/pandas/tests/extension/test_interval.py b/pandas/tests/extension/test_interval.py index 221bf0b7d0648..f1f90b298ffe2 100644 --- a/pandas/tests/extension/test_interval.py +++ b/pandas/tests/extension/test_interval.py @@ -159,4 +159,4 @@ class TestParsing(BaseInterval, base.BaseParsingTests): def test_EA_types(self, engine, data): expected_msg = r'.*must implement _from_sequence_of_strings.*' with pytest.raises(NotImplementedError, match=expected_msg): - super(TestParsing, self).test_EA_types(engine, data) + super().test_EA_types(engine, data) diff --git a/pandas/tests/extension/test_numpy.py b/pandas/tests/extension/test_numpy.py index f7a312c00d193..f31fa5b87cfe5 100644 --- a/pandas/tests/extension/test_numpy.py +++ b/pandas/tests/extension/test_numpy.py @@ -145,7 +145,7 @@ class TestCasting(BaseNumPyTests, base.BaseCastingTests): @skip_nested def test_astype_str(self, data): # ValueError: setting an array element with a sequence - super(TestCasting, self).test_astype_str(data) + super().test_astype_str(data) class TestConstructors(BaseNumPyTests, base.BaseConstructorsTests): @@ -157,7 +157,7 @@ def test_from_dtype(self, data): @skip_nested def test_array_from_scalars(self, data): # ValueError: PandasArray must be 1-dimensional. - super(TestConstructors, self).test_array_from_scalars(data) + super().test_array_from_scalars(data) class TestDtype(BaseNumPyTests, base.BaseDtypeTests): @@ -173,12 +173,12 @@ class TestGetitem(BaseNumPyTests, base.BaseGetitemTests): @skip_nested def test_getitem_scalar(self, data): # AssertionError - super(TestGetitem, self).test_getitem_scalar(data) + super().test_getitem_scalar(data) @skip_nested def test_take_series(self, data): # ValueError: PandasArray must be 1-dimensional. - super(TestGetitem, self).test_take_series(data) + super().test_take_series(data) class TestGroupby(BaseNumPyTests, base.BaseGroupbyTests): @@ -186,15 +186,15 @@ class TestGroupby(BaseNumPyTests, base.BaseGroupbyTests): def test_groupby_extension_apply( self, data_for_grouping, groupby_apply_op): # ValueError: Names should be list-like for a MultiIndex - super(TestGroupby, self).test_groupby_extension_apply( - data_for_grouping, groupby_apply_op) + super().test_groupby_extension_apply(data_for_grouping, + groupby_apply_op) class TestInterface(BaseNumPyTests, base.BaseInterfaceTests): @skip_nested def test_array_interface(self, data): # NumPy array shape inference - super(TestInterface, self).test_array_interface(data) + super().test_array_interface(data) class TestMethods(BaseNumPyTests, base.BaseMethodsTests): @@ -207,56 +207,55 @@ def test_value_counts(self, all_data, dropna): # We have a bool dtype, so the result is an ExtensionArray # but expected is not def test_combine_le(self, data_repeated): - super(TestMethods, self).test_combine_le(data_repeated) + super().test_combine_le(data_repeated) @skip_nested def test_combine_add(self, data_repeated): # Not numeric - super(TestMethods, self).test_combine_add(data_repeated) + super().test_combine_add(data_repeated) @skip_nested def test_shift_fill_value(self, data): # np.array shape inference. Shift implementation fails. - super(TestMethods, self).test_shift_fill_value(data) + super().test_shift_fill_value(data) @skip_nested @pytest.mark.parametrize('box', [pd.Series, lambda x: x]) @pytest.mark.parametrize('method', [lambda x: x.unique(), pd.unique]) def test_unique(self, data, box, method): # Fails creating expected - super(TestMethods, self).test_unique(data, box, method) + super().test_unique(data, box, method) @skip_nested def test_fillna_copy_frame(self, data_missing): # The "scalar" for this array isn't a scalar. - super(TestMethods, self).test_fillna_copy_frame(data_missing) + super().test_fillna_copy_frame(data_missing) @skip_nested def test_fillna_copy_series(self, data_missing): # The "scalar" for this array isn't a scalar. - super(TestMethods, self).test_fillna_copy_series(data_missing) + super().test_fillna_copy_series(data_missing) @skip_nested def test_hash_pandas_object_works(self, data, as_frame): # ndarray of tuples not hashable - super(TestMethods, self).test_hash_pandas_object_works(data, as_frame) + super().test_hash_pandas_object_works(data, as_frame) @skip_nested def test_searchsorted(self, data_for_sorting, as_series): # Test setup fails. - super(TestMethods, self).test_searchsorted(data_for_sorting, as_series) + super().test_searchsorted(data_for_sorting, as_series) @skip_nested def test_where_series(self, data, na_value, as_frame): # Test setup fails. - super(TestMethods, self).test_where_series(data, na_value, as_frame) + super().test_where_series(data, na_value, as_frame) @skip_nested @pytest.mark.parametrize("repeats", [0, 1, 2, [1, 2, 3]]) def test_repeat(self, data, repeats, as_series, use_numpy): # Fails creating expected - super(TestMethods, self).test_repeat( - data, repeats, as_series, use_numpy) + super().test_repeat(data, repeats, as_series, use_numpy) @skip_nested @@ -275,14 +274,10 @@ def test_error(self, data, all_arithmetic_operators): pass def test_arith_series_with_scalar(self, data, all_arithmetic_operators): - super(TestArithmetics, self).test_arith_series_with_scalar( - data, all_arithmetic_operators - ) + super().test_arith_series_with_scalar(data, all_arithmetic_operators) def test_arith_series_with_array(self, data, all_arithmetic_operators): - super(TestArithmetics, self).test_arith_series_with_array( - data, all_arithmetic_operators - ) + super().test_arith_series_with_array(data, all_arithmetic_operators) class TestPrinting(BaseNumPyTests, base.BasePrintingTests): @@ -309,23 +304,23 @@ class TestMissing(BaseNumPyTests, base.BaseMissingTests): @skip_nested def test_fillna_scalar(self, data_missing): # Non-scalar "scalar" values. - super(TestMissing, self).test_fillna_scalar(data_missing) + super().test_fillna_scalar(data_missing) @skip_nested def test_fillna_series_method(self, data_missing, fillna_method): # Non-scalar "scalar" values. - super(TestMissing, self).test_fillna_series_method( + super().test_fillna_series_method( data_missing, fillna_method) @skip_nested def test_fillna_series(self, data_missing): # Non-scalar "scalar" values. - super(TestMissing, self).test_fillna_series(data_missing) + super().test_fillna_series(data_missing) @skip_nested def test_fillna_frame(self, data_missing): # Non-scalar "scalar" values. - super(TestMissing, self).test_fillna_frame(data_missing) + super().test_fillna_frame(data_missing) class TestReshaping(BaseNumPyTests, base.BaseReshapingTests): @@ -333,23 +328,22 @@ class TestReshaping(BaseNumPyTests, base.BaseReshapingTests): @pytest.mark.skip("Incorrect parent test") # not actually a mixed concat, since we concat int and int. def test_concat_mixed_dtypes(self, data): - super(TestReshaping, self).test_concat_mixed_dtypes(data) + super().test_concat_mixed_dtypes(data) @skip_nested def test_merge(self, data, na_value): # Fails creating expected - super(TestReshaping, self).test_merge(data, na_value) + super().test_merge(data, na_value) @skip_nested def test_merge_on_extension_array(self, data): # Fails creating expected - super(TestReshaping, self).test_merge_on_extension_array(data) + super().test_merge_on_extension_array(data) @skip_nested def test_merge_on_extension_array_duplicates(self, data): # Fails creating expected - super(TestReshaping, self).test_merge_on_extension_array_duplicates( - data) + super().test_merge_on_extension_array_duplicates(data) class TestSetitem(BaseNumPyTests, base.BaseSetitemTests): @@ -357,61 +351,56 @@ class TestSetitem(BaseNumPyTests, base.BaseSetitemTests): @skip_nested def test_setitem_scalar_series(self, data, box_in_series): # AssertionError - super(TestSetitem, self).test_setitem_scalar_series( - data, box_in_series) + super().test_setitem_scalar_series(data, box_in_series) @skip_nested def test_setitem_sequence(self, data, box_in_series): # ValueError: shape mismatch: value array of shape (2,1) could not # be broadcast to indexing result of shape (2,) - super(TestSetitem, self).test_setitem_sequence(data, box_in_series) + super().test_setitem_sequence(data, box_in_series) @skip_nested def test_setitem_sequence_mismatched_length_raises(self, data, as_array): # ValueError: PandasArray must be 1-dimensional. - (super(TestSetitem, self). - test_setitem_sequence_mismatched_length_raises(data, as_array)) + super().test_setitem_sequence_mismatched_length_raises(data, as_array) @skip_nested def test_setitem_sequence_broadcasts(self, data, box_in_series): # ValueError: cannot set using a list-like indexer with a different # length than the value - super(TestSetitem, self).test_setitem_sequence_broadcasts( - data, box_in_series) + super().test_setitem_sequence_broadcasts(data, box_in_series) @skip_nested def test_setitem_loc_scalar_mixed(self, data): # AssertionError - super(TestSetitem, self).test_setitem_loc_scalar_mixed(data) + super().test_setitem_loc_scalar_mixed(data) @skip_nested def test_setitem_loc_scalar_multiple_homogoneous(self, data): # AssertionError - super(TestSetitem, self).test_setitem_loc_scalar_multiple_homogoneous( - data) + super().test_setitem_loc_scalar_multiple_homogoneous(data) @skip_nested def test_setitem_iloc_scalar_mixed(self, data): # AssertionError - super(TestSetitem, self).test_setitem_iloc_scalar_mixed(data) + super().test_setitem_iloc_scalar_mixed(data) @skip_nested def test_setitem_iloc_scalar_multiple_homogoneous(self, data): # AssertionError - super(TestSetitem, self).test_setitem_iloc_scalar_multiple_homogoneous( - data) + super().test_setitem_iloc_scalar_multiple_homogoneous(data) @skip_nested @pytest.mark.parametrize('setter', ['loc', None]) def test_setitem_mask_broadcast(self, data, setter): # ValueError: cannot set using a list-like indexer with a different # length than the value - super(TestSetitem, self).test_setitem_mask_broadcast(data, setter) + super().test_setitem_mask_broadcast(data, setter) @skip_nested def test_setitem_scalar_key_sequence_raise(self, data): # Failed: DID NOT RAISE - super(TestSetitem, self).test_setitem_scalar_key_sequence_raise(data) + super().test_setitem_scalar_key_sequence_raise(data) @skip_nested diff --git a/pandas/tests/extension/test_period.py b/pandas/tests/extension/test_period.py index 4e86e8ee8b24e..b988dcb211dd0 100644 --- a/pandas/tests/extension/test_period.py +++ b/pandas/tests/extension/test_period.py @@ -93,9 +93,8 @@ def test_arith_series_with_scalar(self, data, all_arithmetic_operators): exc=None) else: # ... but not the rest. - super(TestArithmeticOps, self).test_arith_series_with_scalar( - data, all_arithmetic_operators - ) + super().test_arith_series_with_scalar( + data, all_arithmetic_operators) def test_arith_series_with_array(self, data, all_arithmetic_operators): if all_arithmetic_operators in self.implements: @@ -104,14 +103,11 @@ def test_arith_series_with_array(self, data, all_arithmetic_operators): exc=None) else: # ... but not the rest. - super(TestArithmeticOps, self).test_arith_series_with_scalar( - data, all_arithmetic_operators - ) + super().test_arith_series_with_scalar( + data, all_arithmetic_operators) def _check_divmod_op(self, s, op, other, exc=NotImplementedError): - super(TestArithmeticOps, self)._check_divmod_op( - s, op, other, exc=TypeError - ) + super()._check_divmod_op(s, op, other, exc=TypeError) def test_add_series_with_extension_array(self, data): # we don't implement + for Period @@ -168,4 +164,4 @@ class TestParsing(BasePeriodTests, base.BaseParsingTests): def test_EA_types(self, engine, data): expected_msg = r'.*must implement _from_sequence_of_strings.*' with pytest.raises(NotImplementedError, match=expected_msg): - super(TestParsing, self).test_EA_types(engine, data) + super().test_EA_types(engine, data) diff --git a/pandas/tests/extension/test_sparse.py b/pandas/tests/extension/test_sparse.py index 1fdca8799c44c..faf1905ea1763 100644 --- a/pandas/tests/extension/test_sparse.py +++ b/pandas/tests/extension/test_sparse.py @@ -121,23 +121,23 @@ def test_concat_mixed_dtypes(self, data): def test_concat_columns(self, data, na_value): self._check_unsupported(data) - super(TestReshaping, self).test_concat_columns(data, na_value) + super().test_concat_columns(data, na_value) def test_align(self, data, na_value): self._check_unsupported(data) - super(TestReshaping, self).test_align(data, na_value) + super().test_align(data, na_value) def test_align_frame(self, data, na_value): self._check_unsupported(data) - super(TestReshaping, self).test_align_frame(data, na_value) + super().test_align_frame(data, na_value) def test_align_series_frame(self, data, na_value): self._check_unsupported(data) - super(TestReshaping, self).test_align_series_frame(data, na_value) + super().test_align_series_frame(data, na_value) def test_merge(self, data, na_value): self._check_unsupported(data) - super(TestReshaping, self).test_merge(data, na_value) + super().test_merge(data, na_value) class TestGetitem(BaseSparseTests, base.BaseGetitemTests): @@ -152,7 +152,7 @@ def test_get(self, data): def test_reindex(self, data, na_value): self._check_unsupported(data) - super(TestGetitem, self).test_reindex(data, na_value) + super().test_reindex(data, na_value) # Skipping TestSetitem, since we don't implement it. @@ -178,15 +178,15 @@ def test_isna(self, data_missing): def test_fillna_limit_pad(self, data_missing): with tm.assert_produces_warning(PerformanceWarning): - super(TestMissing, self).test_fillna_limit_pad(data_missing) + super().test_fillna_limit_pad(data_missing) def test_fillna_limit_backfill(self, data_missing): with tm.assert_produces_warning(PerformanceWarning): - super(TestMissing, self).test_fillna_limit_backfill(data_missing) + super().test_fillna_limit_backfill(data_missing) def test_fillna_series_method(self, data_missing): with tm.assert_produces_warning(PerformanceWarning): - super(TestMissing, self).test_fillna_limit_backfill(data_missing) + super().test_fillna_limit_backfill(data_missing) @pytest.mark.skip(reason="Unsupported") def test_fillna_series(self): @@ -290,12 +290,11 @@ def test_combine_first(self, data): # Right now this is upcasted to float, just like combine_first # for Series[int] pytest.skip("TODO(SparseArray.__setitem__ will preserve dtype.") - super(TestMethods, self).test_combine_first(data) + super().test_combine_first(data) def test_searchsorted(self, data_for_sorting, as_series): with tm.assert_produces_warning(PerformanceWarning): - super(TestMethods, self).test_searchsorted(data_for_sorting, - as_series) + super().test_searchsorted(data_for_sorting, as_series) class TestCasting(BaseSparseTests, base.BaseCastingTests): @@ -320,17 +319,11 @@ def test_error(self, data, all_arithmetic_operators): def test_arith_series_with_scalar(self, data, all_arithmetic_operators): self._skip_if_different_combine(data) - super(TestArithmeticOps, self).test_arith_series_with_scalar( - data, - all_arithmetic_operators - ) + super().test_arith_series_with_scalar(data, all_arithmetic_operators) def test_arith_series_with_array(self, data, all_arithmetic_operators): self._skip_if_different_combine(data) - super(TestArithmeticOps, self).test_arith_series_with_array( - data, - all_arithmetic_operators - ) + super().test_arith_series_with_array(data, all_arithmetic_operators) class TestComparisonOps(BaseSparseTests, base.BaseComparisonOpsTests): @@ -363,7 +356,7 @@ def _compare_other(self, s, data, op_name, other): class TestPrinting(BaseSparseTests, base.BasePrintingTests): @pytest.mark.xfail(reason='Different repr', strict=True) def test_array_repr(self, data, size): - super(TestPrinting, self).test_array_repr(data, size) + super().test_array_repr(data, size) class TestParsing(BaseSparseTests, base.BaseParsingTests): @@ -371,4 +364,4 @@ class TestParsing(BaseSparseTests, base.BaseParsingTests): def test_EA_types(self, engine, data): expected_msg = r'.*must implement _from_sequence_of_strings.*' with pytest.raises(NotImplementedError, match=expected_msg): - super(TestParsing, self).test_EA_types(engine, data) + super().test_EA_types(engine, data) diff --git a/pandas/tests/frame/test_subclass.py b/pandas/tests/frame/test_subclass.py index 73c5c43cb709d..3f6cec0bddafb 100644 --- a/pandas/tests/frame/test_subclass.py +++ b/pandas/tests/frame/test_subclass.py @@ -31,7 +31,7 @@ class CustomDataFrame(DataFrame): """ def __init__(self, *args, **kw): - super(CustomDataFrame, self).__init__(*args, **kw) + super().__init__(*args, **kw) @property def _constructor(self): diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py index 0d04cf6887888..3d9bfcd126377 100644 --- a/pandas/tests/groupby/test_function.py +++ b/pandas/tests/groupby/test_function.py @@ -1044,7 +1044,7 @@ class RaisingObjectException(Exception): class RaisingObject: def __init__(self, msg='I will raise inside Cython'): - super(RaisingObject, self).__init__() + super().__init__() self.msg = msg def __eq__(self, other): diff --git a/pandas/tests/indexes/datetimes/test_ops.py b/pandas/tests/indexes/datetimes/test_ops.py index 799bec267dfb4..4a8f691987f8e 100644 --- a/pandas/tests/indexes/datetimes/test_ops.py +++ b/pandas/tests/indexes/datetimes/test_ops.py @@ -21,7 +21,7 @@ class TestDatetimeIndexOps(Ops): def setup_method(self, method): - super(TestDatetimeIndexOps, self).setup_method(method) + super().setup_method(method) mask = lambda x: (isinstance(x, DatetimeIndex) or isinstance(x, PeriodIndex)) self.is_valid_objs = [o for o in self.objs if mask(o)] diff --git a/pandas/tests/indexes/interval/test_interval.py b/pandas/tests/indexes/interval/test_interval.py index 372968a3501a2..19e42b4621b3a 100644 --- a/pandas/tests/indexes/interval/test_interval.py +++ b/pandas/tests/indexes/interval/test_interval.py @@ -358,11 +358,11 @@ def test_repr(self): @pytest.mark.skip(reason='not a valid repr as we use interval notation') def test_repr_max_seq_item_setting(self): - super(TestIntervalIndex, self).test_repr_max_seq_item_setting() + super().test_repr_max_seq_item_setting() @pytest.mark.skip(reason='not a valid repr as we use interval notation') def test_repr_roundtrip(self): - super(TestIntervalIndex, self).test_repr_roundtrip() + super().test_repr_roundtrip() def test_frame_repr(self): # https://github.com/pandas-dev/pandas/pull/24134/files diff --git a/pandas/tests/indexes/period/test_ops.py b/pandas/tests/indexes/period/test_ops.py index 8b022268897b6..766919735c191 100644 --- a/pandas/tests/indexes/period/test_ops.py +++ b/pandas/tests/indexes/period/test_ops.py @@ -12,7 +12,7 @@ class TestPeriodIndexOps(Ops): def setup_method(self, method): - super(TestPeriodIndexOps, self).setup_method(method) + super().setup_method(method) mask = lambda x: (isinstance(x, DatetimeIndex) or isinstance(x, PeriodIndex)) self.is_valid_objs = [o for o in self.objs if mask(o)] diff --git a/pandas/tests/indexes/period/test_period.py b/pandas/tests/indexes/period/test_period.py index 89bcf56dbda71..2f3f15101e7ca 100644 --- a/pandas/tests/indexes/period/test_period.py +++ b/pandas/tests/indexes/period/test_period.py @@ -439,7 +439,7 @@ def test_shift(self): @td.skip_if_32bit def test_ndarray_compat_properties(self): - super(TestPeriodIndex, self).test_ndarray_compat_properties() + super().test_ndarray_compat_properties() def test_negative_ordinals(self): Period(ordinal=-1000, freq='A') diff --git a/pandas/tests/indexes/timedeltas/test_ops.py b/pandas/tests/indexes/timedeltas/test_ops.py index 63210f67c2dbd..9e96b7d99e35d 100644 --- a/pandas/tests/indexes/timedeltas/test_ops.py +++ b/pandas/tests/indexes/timedeltas/test_ops.py @@ -14,7 +14,7 @@ class TestTimedeltaIndexOps(Ops): def setup_method(self, method): - super(TestTimedeltaIndexOps, self).setup_method(method) + super().setup_method(method) mask = lambda x: isinstance(x, TimedeltaIndex) self.is_valid_objs = [o for o in self.objs if mask(o)] self.not_valid_objs = [] diff --git a/pandas/tests/io/formats/test_console.py b/pandas/tests/io/formats/test_console.py index 809f448864c08..2f012c4d01912 100644 --- a/pandas/tests/io/formats/test_console.py +++ b/pandas/tests/io/formats/test_console.py @@ -10,7 +10,7 @@ class MockEncoding: # TODO(py27): replace with mock side effect should be an exception that will be raised. """ def __init__(self, encoding): - super(MockEncoding, self).__init__() + super().__init__() self.val = encoding @property diff --git a/pandas/tests/io/msgpack/test_unpack.py b/pandas/tests/io/msgpack/test_unpack.py index 581f831f8f187..f33e0865a1145 100644 --- a/pandas/tests/io/msgpack/test_unpack.py +++ b/pandas/tests/io/msgpack/test_unpack.py @@ -49,8 +49,7 @@ def test_unpacker_ext_hook(self): class MyUnpacker(Unpacker): def __init__(self): - super(MyUnpacker, self).__init__(ext_hook=self._hook, - encoding='utf-8') + super().__init__(ext_hook=self._hook, encoding='utf-8') def _hook(self, code, data): if code == 1: diff --git a/pandas/tests/io/test_html.py b/pandas/tests/io/test_html.py index 3820192f5524f..225503cddceee 100644 --- a/pandas/tests/io/test_html.py +++ b/pandas/tests/io/test_html.py @@ -1138,7 +1138,7 @@ def test_importcheck_thread_safety(self, datapath): class ErrorThread(threading.Thread): def run(self): try: - super(ErrorThread, self).run() + super().run() except Exception as e: self.err = e else: diff --git a/pandas/tests/io/test_packers.py b/pandas/tests/io/test_packers.py index 369432dba7b4f..a0a5780bac207 100644 --- a/pandas/tests/io/test_packers.py +++ b/pandas/tests/io/test_packers.py @@ -329,7 +329,7 @@ def test_intervals(self): class TestIndex(TestPackers): def setup_method(self, method): - super(TestIndex, self).setup_method(method) + super().setup_method(method) self.d = { 'string': tm.makeStringIndex(100), @@ -394,7 +394,7 @@ def categorical_index(self): class TestSeries(TestPackers): def setup_method(self, method): - super(TestSeries, self).setup_method(method) + super().setup_method(method) self.d = {} @@ -444,7 +444,7 @@ def test_basic(self): class TestCategorical(TestPackers): def setup_method(self, method): - super(TestCategorical, self).setup_method(method) + super().setup_method(method) self.d = {} @@ -468,7 +468,7 @@ def test_basic(self): class TestNDFrame(TestPackers): def setup_method(self, method): - super(TestNDFrame, self).setup_method(method) + super().setup_method(method) data = { 'A': [0., 1., 2., 3., np.nan], @@ -610,7 +610,7 @@ def setup_method(self, method): else: self._SQLALCHEMY_INSTALLED = True - super(TestCompression, self).setup_method(method) + super().setup_method(method) data = { 'A': np.arange(1000, dtype=np.float64), 'B': np.arange(1000, dtype=np.int32), @@ -799,7 +799,7 @@ def test_readonly_axis_zlib_to_sql(self): class TestEncoding(TestPackers): def setup_method(self, method): - super(TestEncoding, self).setup_method(method) + super().setup_method(method) data = { 'A': ['\u2019'] * 1000, 'B': np.arange(1000, dtype=np.int32), diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py index 646b424b7b6cd..4c37830af172d 100644 --- a/pandas/tests/io/test_sql.py +++ b/pandas/tests/io/test_sql.py @@ -1041,7 +1041,7 @@ class _EngineToConnMixin: @pytest.fixture(autouse=True) def setup_method(self, load_iris_data): - super(_EngineToConnMixin, self).load_test_data_and_sql() + super().load_test_data_and_sql() engine = self.conn conn = engine.connect() self.__tx = conn.begin() @@ -1056,7 +1056,7 @@ def setup_method(self, load_iris_data): self.conn = self.__engine self.pandasSQL = sql.SQLDatabase(self.__engine) # XXX: - # super(_EngineToConnMixin, self).teardown_method(method) + # super().teardown_method(method) @pytest.mark.single diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py index 009fc015dd61c..d59891f573d4f 100644 --- a/pandas/tests/test_base.py +++ b/pandas/tests/test_base.py @@ -257,7 +257,7 @@ def test_binary_ops_docs(self, klass): class TestIndexOps(Ops): def setup_method(self, method): - super(TestIndexOps, self).setup_method(method) + super().setup_method(method) self.is_valid_objs = self.objs self.not_valid_objs = [] diff --git a/pandas/tests/test_window.py b/pandas/tests/test_window.py index 937b3218eb5c6..ed5fa18f4e26f 100644 --- a/pandas/tests/test_window.py +++ b/pandas/tests/test_window.py @@ -2162,7 +2162,7 @@ class TestMomentsConsistency(Base): ] def _create_data(self): - super(TestMomentsConsistency, self)._create_data() + super()._create_data() self.data = _consistency_data def setup_method(self, method): diff --git a/pandas/tests/tseries/holiday/test_calendar.py b/pandas/tests/tseries/holiday/test_calendar.py index a5cc4095ce583..407e83de5e6e9 100644 --- a/pandas/tests/tseries/holiday/test_calendar.py +++ b/pandas/tests/tseries/holiday/test_calendar.py @@ -43,7 +43,7 @@ def test_calendar_caching(): class TestCalendar(AbstractHolidayCalendar): def __init__(self, name=None, rules=None): - super(TestCalendar, self).__init__(name=name, rules=rules) + super().__init__(name=name, rules=rules) jan1 = TestCalendar(rules=[Holiday("jan1", year=2015, month=1, day=1)]) jan2 = TestCalendar(rules=[Holiday("jan2", year=2015, month=1, day=2)]) diff --git a/pandas/tseries/holiday.py b/pandas/tseries/holiday.py index af7e5c254996c..ae080803ba764 100644 --- a/pandas/tseries/holiday.py +++ b/pandas/tseries/holiday.py @@ -344,7 +344,7 @@ def __init__(self, name=None, rules=None): rules : array of Holiday objects A set of rules used to create the holidays. """ - super(AbstractHolidayCalendar, self).__init__() + super().__init__() if name is None: name = self.__class__.__name__ self.name = name diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index d11946f99dd36..96de63f63aac5 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -784,7 +784,7 @@ def _onOffset(self, dt, businesshours): return False def _repr_attrs(self): - out = super(BusinessHourMixin, self)._repr_attrs() + out = super()._repr_attrs() start = self.start.strftime('%H:%M') end = self.end.strftime('%H:%M') attrs = ['{prefix}={start}-{end}'.format(prefix=self._prefix, @@ -806,7 +806,7 @@ class BusinessHour(BusinessHourMixin, SingleConstructorOffset): def __init__(self, n=1, normalize=False, start='09:00', end='17:00', offset=timedelta(0)): BaseOffset.__init__(self, n, normalize) - super(BusinessHour, self).__init__(start=start, end=end, offset=offset) + super().__init__(start=start, end=end, offset=offset) class CustomBusinessDay(_CustomMixin, BusinessDay):