From 1662318d3637829158eeee9b792e3a69e8e9c996 Mon Sep 17 00:00:00 2001 From: tp Date: Sat, 13 Apr 2019 21:58:27 +0100 Subject: [PATCH] CLN: remove compat.iteritems --- pandas/compat/__init__.py | 6 +-- pandas/core/arrays/categorical.py | 3 +- pandas/core/base.py | 4 +- pandas/core/common.py | 4 +- pandas/core/computation/align.py | 5 +-- pandas/core/computation/expr.py | 4 +- pandas/core/frame.py | 17 ++++----- pandas/core/generic.py | 11 +++--- pandas/core/groupby/groupby.py | 3 +- pandas/core/indexes/base.py | 7 ++-- pandas/core/indexes/category.py | 2 +- pandas/core/indexes/range.py | 2 +- pandas/core/nanops.py | 2 +- pandas/core/panel.py | 12 +++--- pandas/core/reshape/melt.py | 3 +- pandas/core/series.py | 5 +-- pandas/core/sparse/frame.py | 29 +++++++------- pandas/io/html.py | 6 +-- pandas/io/json/json.py | 14 +++---- pandas/io/json/normalize.py | 2 +- pandas/io/packers.py | 2 +- pandas/io/parsers.py | 16 ++++---- pandas/io/pytables.py | 8 ++-- pandas/plotting/_core.py | 3 +- pandas/tests/frame/common.py | 6 +-- pandas/tests/frame/conftest.py | 10 ++--- pandas/tests/frame/test_api.py | 14 +++---- pandas/tests/frame/test_apply.py | 7 ++-- .../tests/frame/test_axis_select_reindex.py | 11 +++--- pandas/tests/frame/test_constructors.py | 22 +++++------ pandas/tests/frame/test_convert_to.py | 23 ++++++----- pandas/tests/frame/test_dtypes.py | 7 ++-- pandas/tests/frame/test_indexing.py | 12 +++--- pandas/tests/frame/test_operators.py | 10 ++--- pandas/tests/frame/test_replace.py | 11 ++---- pandas/tests/groupby/test_function.py | 4 +- pandas/tests/groupby/test_groupby.py | 8 ++-- pandas/tests/groupby/test_grouping.py | 6 +-- pandas/tests/indexes/common.py | 25 ++++++------ .../tests/indexes/datetimes/test_indexing.py | 6 +-- pandas/tests/indexes/datetimes/test_tools.py | 8 ++-- .../tests/indexes/timedeltas/test_indexing.py | 6 +-- pandas/tests/io/json/__pycache__/tmp2c7r4efu | Bin 0 -> 42790 bytes pandas/tests/io/json/test_pandas.py | 5 +-- pandas/tests/io/json/test_ujson.py | 2 +- pandas/tests/io/parser/test_textreader.py | 3 +- pandas/tests/io/test_excel.py | 4 +- pandas/tests/io/test_stata.py | 3 +- pandas/tests/plotting/common.py | 3 +- pandas/tests/reshape/merge/test_join.py | 3 +- pandas/tests/reshape/test_concat.py | 10 ++--- pandas/tests/scalar/period/test_period.py | 3 +- .../tests/series/indexing/test_alter_index.py | 7 ++-- pandas/tests/series/test_api.py | 7 ++-- pandas/tests/series/test_apply.py | 5 +-- pandas/tests/series/test_combine_concat.py | 4 +- pandas/tests/series/test_rank.py | 3 +- pandas/tests/sparse/frame/test_frame.py | 6 +-- pandas/tests/sparse/series/test_series.py | 6 +-- pandas/tests/test_compat.py | 3 +- pandas/tests/test_sorting.py | 5 +-- .../tseries/frequencies/test_freq_code.py | 3 +- .../tseries/frequencies/test_inference.py | 5 +-- pandas/tests/tseries/offsets/test_offsets.py | 36 +++++++++--------- .../tests/tseries/offsets/test_yqm_offsets.py | 30 +++++++-------- pandas/util/_doctools.py | 4 +- pandas/util/testing.py | 3 +- 67 files changed, 241 insertions(+), 288 deletions(-) create mode 100644 pandas/tests/io/json/__pycache__/tmp2c7r4efu diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py index 54a7afd90a09a..549359259bbd4 100644 --- a/pandas/compat/__init__.py +++ b/pandas/compat/__init__.py @@ -6,7 +6,7 @@ Key items to import for compatible code: * lists: lrange(), lmap(), lzip(), lfilter() -* iterable method compatibility: iteritems, iterkeys, itervalues +* iterable method compatibility: iterkeys, itervalues * Uses the original method if available, otherwise uses items, keys, values. * add_metaclass(metaclass) - class decorator that recreates class with with the given metaclass instead (and avoids intermediary class creation) @@ -45,10 +45,6 @@ def lfilter(*args, **kwargs): return list(filter(*args, **kwargs)) -def iteritems(obj, **kw): - return iter(obj.items(**kw)) - - def iterkeys(obj, **kw): return iter(obj.keys(**kw)) diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index cd49946652566..995ed59ddabb1 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -9,7 +9,6 @@ from pandas._config import get_option from pandas._libs import algos as libalgos, lib -import pandas.compat as compat from pandas.compat import lzip from pandas.compat.numpy import function as nv from pandas.util._decorators import ( @@ -1317,7 +1316,7 @@ def __setstate__(self, state): state['_dtype'] = CategoricalDtype(state['_categories'], state['_ordered']) - for k, v in compat.iteritems(state): + for k, v in state.items(): setattr(self, k, v) @property diff --git a/pandas/core/base.py b/pandas/core/base.py index d4294e59cc845..40f090f661c2f 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -364,7 +364,7 @@ def nested_renaming_depr(level=4): # be list-likes if any(is_aggregator(x) for x in compat.itervalues(arg)): new_arg = OrderedDict() - for k, v in compat.iteritems(arg): + for k, v in arg.items(): if not isinstance(v, (tuple, list, dict)): new_arg[k] = [v] else: @@ -432,7 +432,7 @@ def _agg(arg, func): return an OrderedDict """ result = OrderedDict() - for fname, agg_how in compat.iteritems(arg): + for fname, agg_how in arg.items(): result[fname] = func(fname, agg_how) return result diff --git a/pandas/core/common.py b/pandas/core/common.py index 3cb23e9ee921d..e62a2119df820 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -14,7 +14,7 @@ import numpy as np from pandas._libs import lib, tslibs -from pandas.compat import PY36, iteritems +from pandas.compat import PY36 from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike from pandas.core.dtypes.common import ( @@ -362,7 +362,7 @@ def dict_compat(d): dict """ - return {maybe_box_datetimelike(key): value for key, value in iteritems(d)} + return {maybe_box_datetimelike(key): value for key, value in d.items()} def standardize_mapping(into): diff --git a/pandas/core/computation/align.py b/pandas/core/computation/align.py index 71b57ec4ecd1e..a7524161dd80e 100644 --- a/pandas/core/computation/align.py +++ b/pandas/core/computation/align.py @@ -9,7 +9,6 @@ from pandas.errors import PerformanceWarning import pandas as pd -from pandas import compat import pandas.core.common as com from pandas.core.computation.common import _result_type_many @@ -30,7 +29,7 @@ def _align_core_single_unary_op(term): def _zip_axes_from_type(typ, new_axes): axes = {ax_name: new_axes[ax_ind] - for ax_ind, ax_name in compat.iteritems(typ._AXIS_NAMES)} + for ax_ind, ax_name in typ._AXIS_NAMES.items()} return axes @@ -84,7 +83,7 @@ def _align_core(terms): if not axes[ax].is_(itm): axes[ax] = axes[ax].join(itm, how='outer') - for i, ndim in compat.iteritems(ndims): + for i, ndim in ndims.items(): for axis, items in zip(range(ndim), axes): ti = terms[i].value diff --git a/pandas/core/computation/expr.py b/pandas/core/computation/expr.py index e61dbd07dac5d..245cd9c403080 100644 --- a/pandas/core/computation/expr.py +++ b/pandas/core/computation/expr.py @@ -10,7 +10,7 @@ import numpy as np -from pandas.compat import iteritems, lmap +from pandas.compat import lmap import pandas as pd from pandas.core import common as com @@ -300,7 +300,7 @@ def f(self, node, *args, **kwargs): def add_ops(op_classes): """Decorator to add default implementation of ops.""" def f(cls): - for op_attr_name, op_class in iteritems(op_classes): + for op_attr_name, op_class in op_classes.items(): ops = getattr(cls, '{name}_ops'.format(name=op_attr_name)) ops_map = getattr(cls, '{name}_op_nodes_map'.format( name=op_attr_name)) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index fdc99e957e257..501bc7811a385 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -33,7 +33,6 @@ from pandas.util._validators import (validate_bool_kwarg, validate_axis_style_args) -from pandas import compat from pandas.compat import PY36, lmap, lzip, raise_with_traceback from pandas.compat.numpy import function as nv from pandas.core.dtypes.cast import ( @@ -1275,9 +1274,9 @@ def to_dict(self, orient='dict', into=dict): into_c = com.standardize_mapping(into) if orient.lower().startswith('d'): return into_c( - (k, v.to_dict(into)) for k, v in compat.iteritems(self)) + (k, v.to_dict(into)) for k, v in self.items()) elif orient.lower().startswith('l'): - return into_c((k, v.tolist()) for k, v in compat.iteritems(self)) + return into_c((k, v.tolist()) for k, v in self.items()) elif orient.lower().startswith('sp'): return into_c((('index', self.index.tolist()), ('columns', self.columns.tolist()), @@ -1287,14 +1286,14 @@ def to_dict(self, orient='dict', into=dict): ]))) elif orient.lower().startswith('s'): return into_c((k, com.maybe_box_datetimelike(v)) - for k, v in compat.iteritems(self)) + for k, v in self.items()) elif orient.lower().startswith('r'): columns = self.columns.tolist() rows = (dict(zip(columns, row)) for row in self.itertuples(index=False, name=None)) return [ into_c((k, com.maybe_box_datetimelike(v)) - for k, v in compat.iteritems(row)) + for k, v in row.items()) for row in rows] elif orient.lower().startswith('i'): if not self.index.is_unique: @@ -1480,7 +1479,7 @@ def from_records(cls, data, index=None, exclude=None, columns=None, else: arrays = [] arr_columns = [] - for k, v in compat.iteritems(data): + for k, v in data.items(): if k in columns: arr_columns.append(k) arrays.append(v) @@ -2430,7 +2429,7 @@ def _sizeof_fmt(num, size_qualifier): counts = self.get_dtype_counts() dtypes = ['{k}({kk:d})'.format(k=k[0], kk=k[1]) for k - in sorted(compat.iteritems(counts))] + in sorted(counts.items())] lines.append('dtypes: {types}'.format(types=', '.join(dtypes))) if memory_usage is None: @@ -8051,8 +8050,8 @@ def isin(self, values): def _from_nested_dict(data): # TODO: this should be seriously cythonized new_data = OrderedDict() - for index, s in compat.iteritems(data): - for col, v in compat.iteritems(s): + for index, s in data.items(): + for col, v in s.items(): new_data[col] = new_data.get(col, OrderedDict()) new_data[col][index] = v return new_data diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 885c499c58dfa..e17e3fd5d3e92 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -15,7 +15,6 @@ from pandas._config import config from pandas._libs import Timestamp, iNaT, properties -import pandas.compat as compat from pandas.compat import lrange, lzip, set_function_name, to_str from pandas.compat.numpy import function as nv from pandas.errors import AbstractMethodError @@ -6154,7 +6153,7 @@ def fillna(self, value=None, method=None, axis=None, inplace=False, 'by column') result = self if inplace else self.copy() - for k, v in compat.iteritems(value): + for k, v in value.items(): if k not in result: continue obj = result[k] @@ -6512,7 +6511,7 @@ def replace(self, to_replace=None, value=None, inplace=False, limit=None, to_replace = regex regex = True - items = list(compat.iteritems(to_replace)) + items = list(to_replace.items()) keys, values = lzip(*items) or ([], []) are_mappings = [is_dict_like(v) for v in values] @@ -6551,7 +6550,7 @@ def replace(self, to_replace=None, value=None, inplace=False, limit=None, if is_dict_like(to_replace): if is_dict_like(value): # {'A' : NA} -> {'A' : 0} res = self if inplace else self.copy() - for c, src in compat.iteritems(to_replace): + for c, src in to_replace.items(): if c in value and c in self: # object conversion is handled in # series.replace which is called recursivelly @@ -6563,7 +6562,7 @@ def replace(self, to_replace=None, value=None, inplace=False, limit=None, # {'A': NA} -> 0 elif not is_list_like(value): - keys = [(k, src) for k, src in compat.iteritems(to_replace) + keys = [(k, src) for k, src in to_replace.items() if k in self] keys_len = len(keys) - 1 for i, (k, src) in enumerate(keys): @@ -6610,7 +6609,7 @@ def replace(self, to_replace=None, value=None, inplace=False, limit=None, if is_dict_like(value): # NA -> {'A' : 0, 'B' : -1} new_data = self._data - for k, v in compat.iteritems(value): + for k, v in value.items(): if k in self: new_data = new_data.replace(to_replace=to_replace, value=v, filter=[k], diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 92cb4db2ac868..b1936a8f5121f 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -20,7 +20,6 @@ class providing the base-class of operations. from pandas._config.config import option_context from pandas._libs import Timestamp, groupby as libgroupby -import pandas.compat as compat from pandas.compat import set_function_name from pandas.compat.numpy import function as nv from pandas.errors import AbstractMethodError @@ -876,7 +875,7 @@ def _python_agg_general(self, func, *args, **kwargs): if self.grouper._filter_empty_groups: mask = counts.ravel() > 0 - for name, result in compat.iteritems(output): + for name, result in output.items(): # since we are masking, make sure that we have a float object values = result diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 98647a6895574..4848170eaea43 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -11,7 +11,6 @@ from pandas._libs.lib import is_datetime_array from pandas._libs.tslibs import OutOfBoundsDatetime, Timedelta, Timestamp from pandas._libs.tslibs.timezones import tz_compare -import pandas.compat as compat from pandas.compat import set_function_name from pandas.compat.numpy import function as nv from pandas.util._decorators import Appender, Substitution, cache_readonly @@ -535,7 +534,7 @@ def _simple_new(cls, values, name=None, dtype=None, **kwargs): # we actually set this value too. result._index_data = values result.name = name - for k, v in compat.iteritems(kwargs): + for k, v in kwargs.items(): setattr(result, k, v) return result._reset_identity() @@ -1754,7 +1753,7 @@ def __setstate__(self, state): if isinstance(state, dict): self._data = state.pop('data') - for k, v in compat.iteritems(state): + for k, v in state.items(): setattr(self, k, v) elif isinstance(state, tuple): @@ -4486,7 +4485,7 @@ def groupby(self, values): result = values._reverse_indexer() # map to the label - result = {k: self.take(v) for k, v in compat.iteritems(result)} + result = {k: self.take(v) for k, v in result.items()} return result diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index 4493136e3e61e..930b2a4a5161f 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -238,7 +238,7 @@ def _simple_new(cls, values, name=None, dtype=None, **kwargs): values = cls._create_categorical(values, dtype=dtype) result._data = values result.name = name - for k, v in compat.iteritems(kwargs): + for k, v in kwargs.items(): setattr(result, k, v) result._reset_identity() diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index da6a917c93ba4..52899ea311e9b 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -156,7 +156,7 @@ def _simple_new(cls, start, stop=None, step=None, name=None, result._stop = stop or 0 result._step = step or 1 result.name = name - for k, v in compat.iteritems(kwargs): + for k, v in kwargs.items(): setattr(result, k, v) result._reset_identity() diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index cfc42d26c5471..6c08cacb551df 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -103,7 +103,7 @@ def __call__(self, alt): @functools.wraps(alt) def f(values, axis=None, skipna=True, **kwds): if len(self.kwargs) > 0: - for k, v in compat.iteritems(self.kwargs): + for k, v in self.kwargs.items(): if k not in kwds: kwds[k] = v try: diff --git a/pandas/core/panel.py b/pandas/core/panel.py index 542b1075313bf..3fb14c5d2ad9a 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -200,13 +200,13 @@ def _init_dict(self, data, axes, dtype=None): if haxis is not None: haxis = ensure_index(haxis) data = OrderedDict((k, v) - for k, v in compat.iteritems(data) + for k, v in data.items() if k in haxis) else: keys = com.dict_keys_to_ordered_list(data) haxis = Index(keys) - for k, v in compat.iteritems(data): + for k, v in data.items(): if isinstance(v, dict): data[k] = self._constructor_sliced(v) @@ -266,8 +266,8 @@ def from_dict(cls, data, intersect=False, orient='items', dtype=None): orient = orient.lower() if orient == 'minor': new_data = defaultdict(OrderedDict) - for col, df in compat.iteritems(data): - for item, s in compat.iteritems(df): + for col, df in data.items(): + for item, s in df.items(): new_data[item][col] = s data = new_data elif orient != 'items': # pragma: no cover @@ -1500,7 +1500,7 @@ def _homogenize_dict(self, frames, intersect=True, dtype=None): result = OrderedDict() adj_frames = OrderedDict() - for k, v in compat.iteritems(frames): + for k, v in frames.items(): if isinstance(v, dict): adj_frames[k] = self._constructor_sliced(v) else: @@ -1512,7 +1512,7 @@ def _homogenize_dict(self, frames, intersect=True, dtype=None): reindex_dict = {self._AXIS_SLICEMAP[a]: axes_dict[a] for a in axes} reindex_dict['copy'] = False - for key, frame in compat.iteritems(adj_frames): + for key, frame in adj_frames.items(): if frame is not None: result[key] = frame.reindex(**reindex_dict) else: diff --git a/pandas/core/reshape/melt.py b/pandas/core/reshape/melt.py index 99224f6fb7c5b..65b28a7ecc849 100644 --- a/pandas/core/reshape/melt.py +++ b/pandas/core/reshape/melt.py @@ -10,7 +10,6 @@ from pandas.core.dtypes.generic import ABCMultiIndex from pandas.core.dtypes.missing import notna -from pandas import compat from pandas.core.arrays import Categorical from pandas.core.frame import _shared_docs from pandas.core.indexes.base import Index @@ -173,7 +172,7 @@ def lreshape(data, groups, dropna=True, label=None): for c in pivot_cols: mask &= notna(mdata[c]) if not mask.all(): - mdata = {k: v[mask] for k, v in compat.iteritems(mdata)} + mdata = {k: v[mask] for k, v in mdata.items()} return data._constructor(mdata, columns=id_cols + pivot_cols) diff --git a/pandas/core/series.py b/pandas/core/series.py index 8a22765d85aec..716ccb0201fea 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -12,7 +12,6 @@ from pandas._config import get_option from pandas._libs import iNaT, index as libindex, lib, tslibs -import pandas.compat as compat from pandas.compat import PY36 from pandas.compat.numpy import function as nv from pandas.util._decorators import Appender, Substitution, deprecate @@ -291,7 +290,7 @@ def _init_dict(self, data, index=None, dtype=None): # Looking for NaN in dict doesn't work ({np.nan : 1}[float('nan')] # raises KeyError), so we iterate the entire dict, and align if data: - keys, values = zip(*compat.iteritems(data)) + keys, values = zip(*data.items()) values = list(values) elif index is not None: # fastpath for Series(data=None). Just use broadcasting a scalar @@ -1523,7 +1522,7 @@ def to_dict(self, into=dict): """ # GH16122 into_c = com.standardize_mapping(into) - return into_c(compat.iteritems(self)) + return into_c(self.items()) def to_frame(self, name=None): """ diff --git a/pandas/core/sparse/frame.py b/pandas/core/sparse/frame.py index 08729442e701f..0ae371d8c8c77 100644 --- a/pandas/core/sparse/frame.py +++ b/pandas/core/sparse/frame.py @@ -7,7 +7,6 @@ import numpy as np from pandas._libs.sparse import BlockIndex, get_blocks -import pandas.compat as compat from pandas.compat import lmap from pandas.compat.numpy import function as nv from pandas.util._decorators import Appender @@ -145,7 +144,7 @@ def _init_dict(self, data, index, columns, dtype=None): # pre-filter out columns if we passed it if columns is not None: columns = ensure_index(columns) - data = {k: v for k, v in compat.iteritems(data) if k in columns} + data = {k: v for k, v in data.items() if k in columns} else: keys = com.dict_keys_to_ordered_list(data) columns = Index(keys) @@ -158,7 +157,7 @@ def sp_maker(x): fill_value=self._default_fill_value, copy=True, dtype=dtype) sdict = {} - for k, v in compat.iteritems(data): + for k, v in data.items(): if isinstance(v, Series): # Force alignment, no copy necessary if not v.index.equals(index): @@ -322,7 +321,7 @@ def _unpickle_sparse_frame_compat(self, state): index = idx series_dict = DataFrame() - for col, (sp_index, sp_values) in compat.iteritems(series): + for col, (sp_index, sp_values) in series.items(): series_dict[col] = SparseSeries(sp_values, sparse_index=sp_index, fill_value=fv) @@ -338,7 +337,7 @@ def to_dense(self): ------- df : DataFrame """ - data = {k: v.to_dense() for k, v in compat.iteritems(self)} + data = {k: v.to_dense() for k, v in self.items()} return DataFrame(data, index=self.index, columns=self.columns) def _apply_columns(self, func): @@ -347,7 +346,7 @@ def _apply_columns(self, func): """ new_data = {col: func(series) - for col, series in compat.iteritems(self)} + for col, series in self.items()} return self._constructor( data=new_data, index=self.index, columns=self.columns, @@ -380,7 +379,7 @@ def density(self): represented in the frame """ tot_nonsparse = sum(ser.sp_index.npoints - for _, ser in compat.iteritems(self)) + for _, ser in self.items()) tot = len(self.index) * len(self.columns) return tot_nonsparse / float(tot) @@ -599,7 +598,7 @@ def _combine_match_index(self, other, func, level=None): this, other = self.align(other, join='outer', axis=0, level=level, copy=False) - for col, series in compat.iteritems(this): + for col, series in this.items(): new_data[col] = func(series.values, other.values) fill_value = self._get_op_result_fill_value(other, func) @@ -723,7 +722,7 @@ def _reindex_columns(self, columns, method, copy, level, fill_value=None, raise NotImplementedError("'method' argument is not supported") # TODO: fill value handling - sdict = {k: v for k, v in compat.iteritems(self) if k in columns} + sdict = {k: v for k, v in self.items() if k in columns} return self._constructor( sdict, index=self.index, columns=columns, default_fill_value=self._default_fill_value).__finalize__(self) @@ -739,7 +738,7 @@ def _reindex_with_indexers(self, reindexers, method=None, fill_value=None, fill_value = np.nan reindexers = {self._get_axis_number(a): val - for (a, val) in compat.iteritems(reindexers)} + for (a, val) in reindexers.items()} index, row_indexer = reindexers.get(0, (None, None)) columns, col_indexer = reindexers.get(1, (None, None)) @@ -917,7 +916,7 @@ def apply(self, func, axis=0, broadcast=None, reduce=None, if isinstance(func, np.ufunc): new_series = {} - for k, v in compat.iteritems(self): + for k, v in self.items(): applied = func(v) applied.fill_value = func(v.fill_value) new_series[k] = applied @@ -969,7 +968,7 @@ def stack_sparse_frame(frame): """ Only makes sense when fill_value is NaN """ - lengths = [s.sp_index.npoints for _, s in compat.iteritems(frame)] + lengths = [s.sp_index.npoints for _, s in frame.items()] nobs = sum(lengths) # this is pretty fast @@ -980,7 +979,7 @@ def stack_sparse_frame(frame): # TODO: Figure out whether this can be reached. # I think this currently can't be reached because you can't build a # SparseDataFrame with a non-np.NaN fill value (fails earlier). - for _, series in compat.iteritems(frame): + for _, series in frame.items(): if not np.isnan(series.fill_value): raise TypeError('This routine assumes NaN fill value') @@ -1021,7 +1020,7 @@ def homogenize(series_dict): need_reindex = False - for _, series in compat.iteritems(series_dict): + for _, series in series_dict.items(): if not np.isnan(series.fill_value): raise TypeError('this method is only valid with NaN fill values') @@ -1033,7 +1032,7 @@ def homogenize(series_dict): if need_reindex: output = {} - for name, series in compat.iteritems(series_dict): + for name, series in series_dict.items(): if not series.sp_index.equals(index): series = series.sparse_reindex(index) diff --git a/pandas/io/html.py b/pandas/io/html.py index 1d588632b69f8..e449bf223ba94 100644 --- a/pandas/io/html.py +++ b/pandas/io/html.py @@ -9,7 +9,7 @@ import os import re -from pandas.compat import iteritems, lmap, lrange, raise_with_traceback +from pandas.compat import lmap, lrange, raise_with_traceback from pandas.errors import AbstractMethodError, EmptyDataError from pandas.core.dtypes.common import is_list_like @@ -617,7 +617,7 @@ def _build_xpath_expr(attrs): if 'class_' in attrs: attrs['class'] = attrs.pop('class_') - s = ["@{key}={val!r}".format(key=k, val=v) for k, v in iteritems(attrs)] + s = ["@{key}={val!r}".format(key=k, val=v) for k, v in attrs.items()] return '[{expr}]'.format(expr=' and '.join(s)) @@ -769,7 +769,7 @@ def _expand_elements(body): not_max = lens[lens != lens_max] empty = [''] - for ind, length in iteritems(not_max): + for ind, length in not_max.items(): body[ind] += empty * (lens_max - length) diff --git a/pandas/io/json/json.py b/pandas/io/json/json.py index 28cc768ba4e21..8a9533991fada 100644 --- a/pandas/io/json/json.py +++ b/pandas/io/json/json.py @@ -12,7 +12,7 @@ from pandas.core.dtypes.common import is_period_dtype -from pandas import DataFrame, MultiIndex, Series, compat, isna, to_datetime +from pandas import DataFrame, MultiIndex, Series, isna, to_datetime from pandas.core.reshape.concat import concat from pandas.io.common import ( @@ -822,8 +822,8 @@ def _parse_no_numpy(self): json = self.json orient = self.orient if orient == "split": - decoded = {str(k): v for k, v in compat.iteritems( - loads(json, precise_float=self.precise_float))} + decoded = {str(k): v for k, v in loads( + json, precise_float=self.precise_float).items()} self.check_keys_split(decoded) self.obj = Series(dtype=None, **decoded) else: @@ -837,7 +837,7 @@ def _parse_numpy(self): if orient == "split": decoded = loads(json, dtype=None, numpy=True, precise_float=self.precise_float) - decoded = {str(k): v for k, v in compat.iteritems(decoded)} + decoded = {str(k): v for k, v in decoded.items()} self.check_keys_split(decoded) self.obj = Series(**decoded) elif orient == "columns" or orient == "index": @@ -875,7 +875,7 @@ def _parse_numpy(self): elif orient == "split": decoded = loads(json, dtype=None, numpy=True, precise_float=self.precise_float) - decoded = {str(k): v for k, v in compat.iteritems(decoded)} + decoded = {str(k): v for k, v in decoded.items()} self.check_keys_split(decoded) self.obj = DataFrame(**decoded) elif orient == "values": @@ -895,8 +895,8 @@ def _parse_no_numpy(self): self.obj = DataFrame( loads(json, precise_float=self.precise_float), dtype=None) elif orient == "split": - decoded = {str(k): v for k, v in compat.iteritems( - loads(json, precise_float=self.precise_float))} + decoded = {str(k): v for k, v in loads( + json, precise_float=self.precise_float).items()} self.check_keys_split(decoded) self.obj = DataFrame(dtype=None, **decoded) elif orient == "index": diff --git a/pandas/io/json/normalize.py b/pandas/io/json/normalize.py index a836faec2b04f..26bf6a8cf410d 100644 --- a/pandas/io/json/normalize.py +++ b/pandas/io/json/normalize.py @@ -273,7 +273,7 @@ def _recursive_extract(data, path, seen_meta, level=0): columns=lambda x: "{p}{c}".format(p=record_prefix, c=x)) # Data types, a problem - for k, v in compat.iteritems(meta_vals): + for k, v in meta_vals.items(): if meta_prefix is not None: k = meta_prefix + k diff --git a/pandas/io/packers.py b/pandas/io/packers.py index cff0f0e4b34d0..ac9b132b191b6 100644 --- a/pandas/io/packers.py +++ b/pandas/io/packers.py @@ -462,7 +462,7 @@ def encode(obj): # for f in ['default_fill_value', 'default_kind']: # d[f] = getattr(obj, f, None) # d['data'] = dict([(name, ss) - # for name, ss in compat.iteritems(obj)]) + # for name, ss in obj.items()]) # return d else: diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 5d73b377838b6..5ad6eb009b6ee 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -912,7 +912,7 @@ def _get_options_with_defaults(self, engine): options = {} - for argname, default in compat.iteritems(_parser_defaults): + for argname, default in _parser_defaults.items(): value = kwds.get(argname, default) # see gh-12935 @@ -922,7 +922,7 @@ def _get_options_with_defaults(self, engine): else: options[argname] = value - for argname, default in compat.iteritems(_c_parser_defaults): + for argname, default in _c_parser_defaults.items(): if argname in kwds: value = kwds[argname] @@ -941,7 +941,7 @@ def _get_options_with_defaults(self, engine): options[argname] = value if engine == 'python-fwf': - for argname, default in compat.iteritems(_fwf_defaults): + for argname, default in _fwf_defaults.items(): options[argname] = kwds.get(argname, default) return options @@ -1657,7 +1657,7 @@ def _agg_index(self, index, try_parse_dates=True): def _convert_to_ndarrays(self, dct, na_values, na_fvalues, verbose=False, converters=None, dtypes=None): result = {} - for c, values in compat.iteritems(dct): + for c, values in dct.items(): conv_f = None if converters is None else converters.get(c, None) if isinstance(dtypes, dict): cast_type = dtypes.get(c, None) @@ -2471,7 +2471,7 @@ def _convert_data(self, data): def _clean_mapping(mapping): "converts col numbers to names" clean = {} - for col, v in compat.iteritems(mapping): + for col, v in mapping.items(): if isinstance(col, int) and col not in self.orig_names: col = self.orig_names[col] clean[col] = v @@ -3258,7 +3258,7 @@ def _isindex(colspec): elif isinstance(parse_spec, dict): # dict of new name to column list - for new_name, colspec in compat.iteritems(parse_spec): + for new_name, colspec in parse_spec.items(): if new_name in data_dict: raise ValueError( 'Date column {name} already in dict'.format(name=new_name)) @@ -3316,7 +3316,7 @@ def _clean_na_values(na_values, keep_default_na=True): # into array-likes for further use. This is also # where we append the default NaN values, provided # that `keep_default_na=True`. - for k, v in compat.iteritems(old_na_values): + for k, v in old_na_values.items(): if not is_list_like(v): v = [v] @@ -3386,7 +3386,7 @@ def _get_empty_meta(columns, index_col, index_names, dtype=None): dtype = defaultdict(lambda: np.object) # Convert column indexes to column names. - for k, v in compat.iteritems(_dtype): + for k, v in _dtype.items(): col = columns[k] if is_integer(k) else k dtype[col] = v diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 2dedeaf0a4cda..2cfc1bc5eac2e 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -29,8 +29,8 @@ from pandas import ( DataFrame, DatetimeIndex, Index, Int64Index, MultiIndex, PeriodIndex, - Series, SparseDataFrame, SparseSeries, TimedeltaIndex, compat, concat, - isna, to_datetime) + Series, SparseDataFrame, SparseSeries, TimedeltaIndex, concat, isna, + to_datetime) from pandas.core.arrays.categorical import Categorical from pandas.core.arrays.sparse import BlockIndex, IntIndex from pandas.core.base import StringMixin @@ -2448,7 +2448,7 @@ class GenericFixed(Fixed): """ a generified fixed version """ _index_type_map = {DatetimeIndex: 'datetime', PeriodIndex: 'period'} - _reverse_index_map = {v: k for k, v in compat.iteritems(_index_type_map)} + _reverse_index_map = {v: k for k, v in _index_type_map.items()} attributes = [] # indexer helpders @@ -2912,7 +2912,7 @@ def read(self, **kwargs): def write(self, obj, **kwargs): """ write it as a collection of individual sparse series """ super(SparseFrameFixed, self).write(obj, **kwargs) - for name, ss in compat.iteritems(obj): + for name, ss in obj.items(): key = 'sparse_series_{name}'.format(name=name) if key not in self.group._v_children: node = self._handle.create_group(self.group, key) diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index af23c13063aa3..06560f5d702d6 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -9,7 +9,6 @@ from pandas._config import get_option -import pandas.compat as compat from pandas.compat import lrange from pandas.errors import AbstractMethodError from pandas.util._decorators import Appender, cache_readonly @@ -1627,7 +1626,7 @@ def _validate_color_args(self): if isinstance(self.color, dict): valid_keys = ['boxes', 'whiskers', 'medians', 'caps'] - for key, values in compat.iteritems(self.color): + for key, values in self.color.items(): if key not in valid_keys: raise ValueError("color dict contains invalid " "key '{0}' " diff --git a/pandas/tests/frame/common.py b/pandas/tests/frame/common.py index 5e9a73719f67b..0485ddb0e6f43 100644 --- a/pandas/tests/frame/common.py +++ b/pandas/tests/frame/common.py @@ -3,7 +3,6 @@ from pandas.util._decorators import cache_readonly import pandas as pd -from pandas import compat import pandas.util.testing as tm _seriesd = tm.getSeriesData() @@ -11,8 +10,7 @@ _frame = pd.DataFrame(_seriesd) _frame2 = pd.DataFrame(_seriesd, columns=['D', 'C', 'B', 'A']) -_intframe = pd.DataFrame({k: v.astype(int) - for k, v in compat.iteritems(_seriesd)}) +_intframe = pd.DataFrame({k: v.astype(int) for k, v in _seriesd.items()}) _tsframe = pd.DataFrame(_tsd) @@ -33,7 +31,7 @@ def frame2(self): @cache_readonly def intframe(self): # force these all to int64 to avoid platform testing issues - return pd.DataFrame({c: s for c, s in compat.iteritems(_intframe)}, + return pd.DataFrame({c: s for c, s in _intframe.items()}, dtype=np.int64) @cache_readonly diff --git a/pandas/tests/frame/conftest.py b/pandas/tests/frame/conftest.py index fbe03325a3ad9..27c0e070c10c2 100644 --- a/pandas/tests/frame/conftest.py +++ b/pandas/tests/frame/conftest.py @@ -1,7 +1,7 @@ import numpy as np import pytest -from pandas import DataFrame, NaT, compat, date_range +from pandas import DataFrame, NaT, date_range import pandas.util.testing as tm @@ -51,10 +51,9 @@ def int_frame(): Columns are ['A', 'B', 'C', 'D'] """ - df = DataFrame({k: v.astype(int) - for k, v in compat.iteritems(tm.getSeriesData())}) + df = DataFrame({k: v.astype(int) for k, v in tm.getSeriesData().items()}) # force these all to int64 to avoid platform testing issues - return DataFrame({c: s for c, s in compat.iteritems(df)}, dtype=np.int64) + return DataFrame({c: s for c, s in df.items()}, dtype=np.int64) @pytest.fixture @@ -101,8 +100,7 @@ def mixed_int_frame(): Columns are ['A', 'B', 'C', 'D']. """ - df = DataFrame({k: v.astype(int) - for k, v in compat.iteritems(tm.getSeriesData())}) + df = DataFrame({k: v.astype(int) for k, v in tm.getSeriesData().items()}) df.A = df.A.astype('int32') df.B = np.ones(len(df.B), dtype='uint64') df.C = df.C.astype('uint8') diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py index 600575b5255d2..4d715d19dccc0 100644 --- a/pandas/tests/frame/test_api.py +++ b/pandas/tests/frame/test_api.py @@ -193,7 +193,7 @@ def test_nonzero(self, float_frame, float_string_frame): def test_iteritems(self): df = self.klass([[1, 2, 3], [4, 5, 6]], columns=['a', 'a', 'b']) - for k, v in compat.iteritems(df): + for k, v in df.items(): assert isinstance(v, self.klass._constructor_sliced) def test_items(self): @@ -343,8 +343,8 @@ def test_to_numpy_copy(self): def test_transpose(self, float_frame): frame = float_frame dft = frame.T - for idx, series in compat.iteritems(dft): - for col, value in compat.iteritems(series): + for idx, series in dft.items(): + for col, value in series.items(): if np.isnan(value): assert np.isnan(frame[col][idx]) else: @@ -355,7 +355,7 @@ def test_transpose(self, float_frame): mixed = self.klass(data, index=index) mixed_T = mixed.T - for col, s in compat.iteritems(mixed_T): + for col, s in mixed_T.items(): assert s.dtype == np.object_ def test_swapaxes(self): @@ -398,12 +398,12 @@ def test_repr_with_mi_nat(self, float_string_frame): assert result == expected def test_iteritems_names(self, float_string_frame): - for k, v in compat.iteritems(float_string_frame): + for k, v in float_string_frame.items(): assert v.name == k def test_series_put_names(self, float_string_frame): series = float_string_frame._series - for k, v in compat.iteritems(series): + for k, v in series.items(): assert v.name == k def test_empty_nonzero(self): @@ -459,7 +459,7 @@ def test_deepcopy(self, float_frame): cp = deepcopy(float_frame) series = cp['A'] series[:] = 10 - for idx, value in compat.iteritems(series): + for idx, value in series.items(): assert float_frame['A'][idx] != value def test_transpose_get_view(self, float_frame): diff --git a/pandas/tests/frame/test_apply.py b/pandas/tests/frame/test_apply.py index af6d4391dca74..b2f531bfea249 100644 --- a/pandas/tests/frame/test_apply.py +++ b/pandas/tests/frame/test_apply.py @@ -12,8 +12,7 @@ from pandas.core.dtypes.dtypes import CategoricalDtype import pandas as pd -from pandas import ( - DataFrame, MultiIndex, Series, Timestamp, compat, date_range, notna) +from pandas import DataFrame, MultiIndex, Series, Timestamp, date_range, notna from pandas.conftest import _get_cython_table_params from pandas.core.apply import frame_apply import pandas.util.testing as tm @@ -334,13 +333,13 @@ def test_apply_differently_indexed(self): result0 = df.apply(Series.describe, axis=0) expected0 = DataFrame({i: v.describe() - for i, v in compat.iteritems(df)}, + for i, v in df.items()}, columns=df.columns) assert_frame_equal(result0, expected0) result1 = df.apply(Series.describe, axis=1) expected1 = DataFrame({i: v.describe() - for i, v in compat.iteritems(df.T)}, + for i, v in df.T.items()}, columns=df.index).T assert_frame_equal(result1, expected1) diff --git a/pandas/tests/frame/test_axis_select_reindex.py b/pandas/tests/frame/test_axis_select_reindex.py index 4df297bcc436e..f06d3d38e0a6d 100644 --- a/pandas/tests/frame/test_axis_select_reindex.py +++ b/pandas/tests/frame/test_axis_select_reindex.py @@ -10,8 +10,7 @@ import pandas as pd from pandas import ( - Categorical, DataFrame, Index, MultiIndex, Series, compat, date_range, - isna) + Categorical, DataFrame, Index, MultiIndex, Series, date_range, isna) from pandas.tests.frame.common import TestData import pandas.util.testing as tm from pandas.util.testing import assert_frame_equal @@ -212,7 +211,7 @@ def test_reindex(self): newFrame = self.frame.reindex(self.ts1.index) for col in newFrame.columns: - for idx, val in compat.iteritems(newFrame[col]): + for idx, val in newFrame[col].items(): if idx in self.frame.index: if np.isnan(val): assert np.isnan(self.frame[col][idx]) @@ -221,7 +220,7 @@ def test_reindex(self): else: assert np.isnan(val) - for col, series in compat.iteritems(newFrame): + for col, series in newFrame.items(): assert tm.equalContents(series.index, newFrame.index) emptyFrame = self.frame.reindex(Index([])) assert len(emptyFrame.index) == 0 @@ -230,7 +229,7 @@ def test_reindex(self): nonContigFrame = self.frame.reindex(self.ts1.index[::2]) for col in nonContigFrame.columns: - for idx, val in compat.iteritems(nonContigFrame[col]): + for idx, val in nonContigFrame[col].items(): if idx in self.frame.index: if np.isnan(val): assert np.isnan(self.frame[col][idx]) @@ -239,7 +238,7 @@ def test_reindex(self): else: assert np.isnan(val) - for col, series in compat.iteritems(nonContigFrame): + for col, series in nonContigFrame.items(): assert tm.equalContents(series.index, nonContigFrame.index) # corner cases diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index e8736e514425f..d071e13599e5d 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -17,7 +17,7 @@ import pandas as pd from pandas import ( Categorical, DataFrame, Index, MultiIndex, RangeIndex, Series, Timedelta, - Timestamp, compat, date_range, isna) + Timestamp, date_range, isna) from pandas.tests.frame.common import TestData import pandas.util.testing as tm @@ -462,11 +462,11 @@ def test_constructor_subclass_dict(self): data = {'col1': tm.TestSubDict((x, 10.0 * x) for x in range(10)), 'col2': tm.TestSubDict((x, 20.0 * x) for x in range(10))} df = DataFrame(data) - refdf = DataFrame({col: dict(compat.iteritems(val)) - for col, val in compat.iteritems(data)}) + refdf = DataFrame({col: dict(val.items()) + for col, val in data.items()}) tm.assert_frame_equal(refdf, df) - data = tm.TestSubDict(compat.iteritems(data)) + data = tm.TestSubDict(data.items()) df = DataFrame(data) tm.assert_frame_equal(refdf, df) @@ -474,7 +474,7 @@ def test_constructor_subclass_dict(self): from collections import defaultdict data = {} self.frame['B'][:10] = np.nan - for k, v in compat.iteritems(self.frame): + for k, v in self.frame.items(): dct = defaultdict(dict) dct.update(v.to_dict()) data[k] = dct @@ -526,7 +526,7 @@ def test_constructor_dict_of_tuples(self): data = {'a': (1, 2, 3), 'b': (4, 5, 6)} result = DataFrame(data) - expected = DataFrame({k: list(v) for k, v in compat.iteritems(data)}) + expected = DataFrame({k: list(v) for k, v in data.items()}) tm.assert_frame_equal(result, expected, check_dtype=False) def test_constructor_dict_multiindex(self): @@ -2099,13 +2099,13 @@ def test_from_records_sequencelike(self): tuples = [] columns = [] dtypes = [] - for dtype, b in compat.iteritems(blocks): + for dtype, b in blocks.items(): columns.extend(b.columns) dtypes.extend([(c, np.dtype(dtype).descr[0][1]) for c in b.columns]) for i in range(len(df.index)): tup = [] - for _, b in compat.iteritems(blocks): + for _, b in blocks.items(): tup.extend(b.iloc[i].values) tuples.append(tuple(tup)) @@ -2172,11 +2172,11 @@ def test_from_records_dictlike(self): # from the dict blocks = df._to_dict_of_blocks() columns = [] - for dtype, b in compat.iteritems(blocks): + for dtype, b in blocks.items(): columns.extend(b.columns) - asdict = {x: y for x, y in compat.iteritems(df)} - asdict2 = {x: y.values for x, y in compat.iteritems(df)} + asdict = {x: y for x, y in df.items()} + asdict2 = {x: y.values for x, y in df.items()} # dict of series & dict of ndarrays (have dtype info) results = [] diff --git a/pandas/tests/frame/test_convert_to.py b/pandas/tests/frame/test_convert_to.py index decd9ec304b37..9aad010a899d2 100644 --- a/pandas/tests/frame/test_convert_to.py +++ b/pandas/tests/frame/test_convert_to.py @@ -8,8 +8,7 @@ import pytz from pandas import ( - CategoricalDtype, DataFrame, MultiIndex, Series, Timestamp, compat, - date_range) + CategoricalDtype, DataFrame, MultiIndex, Series, Timestamp, date_range) from pandas.tests.frame.common import TestData import pandas.util.testing as tm @@ -374,20 +373,20 @@ def test_to_dict(self, mapping): # GH16122 recons_data = DataFrame(test_data).to_dict(into=mapping) - for k, v in compat.iteritems(test_data): - for k2, v2 in compat.iteritems(v): + for k, v in test_data.items(): + for k2, v2 in v.items(): assert (v2 == recons_data[k][k2]) recons_data = DataFrame(test_data).to_dict("l", mapping) - for k, v in compat.iteritems(test_data): - for k2, v2 in compat.iteritems(v): + for k, v in test_data.items(): + for k2, v2 in v.items(): assert (v2 == recons_data[k][int(k2) - 1]) recons_data = DataFrame(test_data).to_dict("s", mapping) - for k, v in compat.iteritems(test_data): - for k2, v2 in compat.iteritems(v): + for k, v in test_data.items(): + for k2, v2 in v.items(): assert (v2 == recons_data[k][k2]) recons_data = DataFrame(test_data).to_dict("sp", mapping) @@ -407,8 +406,8 @@ def test_to_dict(self, mapping): # GH10844 recons_data = DataFrame(test_data).to_dict("i") - for k, v in compat.iteritems(test_data): - for k2, v2 in compat.iteritems(v): + for k, v in test_data.items(): + for k2, v2 in v.items(): assert (v2 == recons_data[k2][k]) df = DataFrame(test_data) @@ -416,8 +415,8 @@ def test_to_dict(self, mapping): recons_data = df.to_dict("i") comp_data = test_data.copy() comp_data['duped'] = comp_data[df.columns[0]] - for k, v in compat.iteritems(comp_data): - for k2, v2 in compat.iteritems(v): + for k, v in comp_data.items(): + for k2, v2 in v.items(): assert (v2 == recons_data[k2][k]) @pytest.mark.parametrize('mapping', [list, defaultdict, []]) diff --git a/pandas/tests/frame/test_dtypes.py b/pandas/tests/frame/test_dtypes.py index fa8a6ab3c29bd..9a10595a9f7ea 100644 --- a/pandas/tests/frame/test_dtypes.py +++ b/pandas/tests/frame/test_dtypes.py @@ -11,7 +11,7 @@ import pandas as pd from pandas import ( Categorical, DataFrame, Series, Timedelta, Timestamp, - _np_version_under1p14, compat, concat, date_range, option_context) + _np_version_under1p14, concat, date_range, option_context) from pandas.core.arrays import integer_array from pandas.tests.frame.common import TestData import pandas.util.testing as tm @@ -388,8 +388,7 @@ def test_select_dtypes_typecodes(self): def test_dtypes_gh8722(self): self.mixed_frame['bool'] = self.mixed_frame['A'] > 0 result = self.mixed_frame.dtypes - expected = Series({k: v.dtype - for k, v in compat.iteritems(self.mixed_frame)}, + expected = Series({k: v.dtype for k, v in self.mixed_frame.items()}, index=result.index) assert_series_equal(result, expected) @@ -431,7 +430,7 @@ def test_astype(self): # mixed casting def _check_cast(df, v): assert (list({s.dtype.name for - _, s in compat.iteritems(df)})[0] == v) + _, s in df.items()})[0] == v) mn = self.all_mixed._get_numeric_data().copy() mn['little_float'] = np.array(12345., dtype='float16') diff --git a/pandas/tests/frame/test_indexing.py b/pandas/tests/frame/test_indexing.py index 9149b305f5d0d..f58fe85cad258 100644 --- a/pandas/tests/frame/test_indexing.py +++ b/pandas/tests/frame/test_indexing.py @@ -15,7 +15,7 @@ import pandas as pd from pandas import ( Categorical, DataFrame, DatetimeIndex, Index, MultiIndex, Series, - Timestamp, compat, date_range, isna, notna) + Timestamp, date_range, isna, notna) import pandas.core.common as com from pandas.core.indexing import IndexingError from pandas.tests.frame.common import TestData @@ -34,11 +34,11 @@ def test_getitem(self): assert len(sl.index) == 20 # Column access - for _, series in compat.iteritems(sl): + for _, series in sl.items(): assert len(series.index) == 20 assert tm.equalContents(series.index, sl.index) - for key, _ in compat.iteritems(self.frame._series): + for key, _ in self.frame._series.items(): assert self.frame[key] is not None assert 'random' not in self.frame @@ -2438,7 +2438,7 @@ def test_at_time_between_time_datetimeindex(self): def test_xs(self): idx = self.frame.index[5] xs = self.frame.xs(idx) - for item, value in compat.iteritems(xs): + for item, value in xs.items(): if np.isnan(value): assert np.isnan(self.frame[item][idx]) else: @@ -2595,7 +2595,7 @@ def is_ok(s): s.dtype != 'uint8') return DataFrame(dict((c, s + 1) if is_ok(s) else (c, s) - for c, s in compat.iteritems(df))) + for c, s in df.items())) def _check_get(df, cond, check_dtypes=True): other1 = _safe_add(df) @@ -2713,7 +2713,7 @@ def _check_set(df, cond, check_dtypes=True): # dtypes (and confirm upcasts)x if check_dtypes: - for k, v in compat.iteritems(df.dtypes): + for k, v in df.dtypes.items(): if issubclass(v.type, np.integer) and not cond[k].all(): v = np.dtype('float64') assert dfi[k].dtype == v diff --git a/pandas/tests/frame/test_operators.py b/pandas/tests/frame/test_operators.py index fc991cd17cae8..edce25566e361 100644 --- a/pandas/tests/frame/test_operators.py +++ b/pandas/tests/frame/test_operators.py @@ -7,7 +7,7 @@ import pytest import pandas as pd -from pandas import DataFrame, MultiIndex, Series, compat +from pandas import DataFrame, MultiIndex, Series import pandas.core.common as com from pandas.tests.frame.common import _check_mixed_float import pandas.util.testing as tm @@ -383,7 +383,7 @@ def test_combineSeries(self, float_frame, mixed_float_frame, added = float_frame + series - for key, s in compat.iteritems(added): + for key, s in added.items(): assert_series_equal(s, float_frame[key] + series[key]) larger_series = series.to_dict() @@ -391,7 +391,7 @@ def test_combineSeries(self, float_frame, mixed_float_frame, larger_series = Series(larger_series) larger_added = float_frame + larger_series - for key, s in compat.iteritems(float_frame): + for key, s in float_frame.items(): assert_series_equal(larger_added[key], s + series[key]) assert 'E' in larger_added assert np.isnan(larger_added['E']).all() @@ -424,7 +424,7 @@ def test_combineSeries(self, float_frame, mixed_float_frame, # and require explicit broadcasting added = datetime_frame.add(ts, axis='index') - for key, col in compat.iteritems(datetime_frame): + for key, col in datetime_frame.items(): result = col + ts assert_series_equal(added[key], result, check_names=False) assert added[key].name == key @@ -465,7 +465,7 @@ def test_combineFunc(self, float_frame, mixed_float_frame): # vs mix result = mixed_float_frame * 2 - for c, s in compat.iteritems(result): + for c, s in result.items(): tm.assert_numpy_array_equal( s.values, mixed_float_frame[c].values * 2) _check_mixed_float(result, dtype=dict(C=None)) diff --git a/pandas/tests/frame/test_replace.py b/pandas/tests/frame/test_replace.py index f44739e83267f..20479f9a4fcbf 100644 --- a/pandas/tests/frame/test_replace.py +++ b/pandas/tests/frame/test_replace.py @@ -10,7 +10,7 @@ from pandas.compat import lrange import pandas as pd -from pandas import DataFrame, Index, Series, Timestamp, compat, date_range +from pandas import DataFrame, Index, Series, Timestamp, date_range from pandas.tests.frame.common import TestData from pandas.util.testing import assert_frame_equal, assert_series_equal @@ -809,8 +809,7 @@ def test_replace_input_formats_listlike(self): df = DataFrame({'A': [np.nan, 0, np.inf], 'B': [0, 2, 5], 'C': ['', 'asdf', 'fd']}) filled = df.replace(to_rep, values) - expected = {k: v.replace(to_rep[k], values[k]) - for k, v in compat.iteritems(df)} + expected = {k: v.replace(to_rep[k], values[k]) for k, v in df.items()} assert_frame_equal(filled, DataFrame(expected)) result = df.replace([0, 2, 5], [5, 2, 0]) @@ -823,8 +822,7 @@ def test_replace_input_formats_listlike(self): df = DataFrame({'A': [np.nan, 0, np.nan], 'B': [0, 2, 5], 'C': ['', 'asdf', 'fd']}) filled = df.replace(np.nan, values) - expected = {k: v.replace(np.nan, values[k]) - for k, v in compat.iteritems(df)} + expected = {k: v.replace(np.nan, values[k]) for k, v in df.items()} assert_frame_equal(filled, DataFrame(expected)) # list to list @@ -847,8 +845,7 @@ def test_replace_input_formats_scalar(self): # dict to scalar to_rep = {'A': np.nan, 'B': 0, 'C': ''} filled = df.replace(to_rep, 0) - expected = {k: v.replace(to_rep[k], 0) - for k, v in compat.iteritems(df)} + expected = {k: v.replace(to_rep[k], 0) for k, v in df.items()} assert_frame_equal(filled, DataFrame(expected)) msg = "value argument must be scalar, dict, or Series" diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py index 0d1575a35a0bc..187fea5403aea 100644 --- a/pandas/tests/groupby/test_function.py +++ b/pandas/tests/groupby/test_function.py @@ -10,7 +10,7 @@ import pandas as pd from pandas import ( - DataFrame, Index, MultiIndex, Series, Timestamp, compat, date_range, isna) + DataFrame, Index, MultiIndex, Series, Timestamp, date_range, isna) import pandas.core.nanops as nanops from pandas.util import testing as tm @@ -392,7 +392,7 @@ def test_groupby_non_arithmetic_agg_int_like_precision(i): "args": [1]}, "count": {"expected": 2}} - for method, data in compat.iteritems(grp_exp): + for method, data in grp_exp.items(): if "args" not in data: data["args"] = [] diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 885def32db046..31b602e38c4ad 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -12,7 +12,7 @@ import pandas as pd from pandas import ( - DataFrame, Index, MultiIndex, Panel, Series, Timestamp, compat, date_range, + DataFrame, Index, MultiIndex, Panel, Series, Timestamp, date_range, read_csv) import pandas.core.common as com import pandas.util.testing as tm @@ -403,7 +403,7 @@ def test_frame_groupby(tsframe): groups = grouped.groups indices = grouped.indices - for k, v in compat.iteritems(groups): + for k, v in groups.items(): samething = tsframe.index.take(indices[k]) assert (samething == v).all() @@ -524,7 +524,7 @@ def test_groupby_multiple_columns(df, op): for n2, gp2 in gp1.groupby('B'): expected[n1][n2] = op(gp2.loc[:, ['C', 'D']]) expected = {k: DataFrame(v) - for k, v in compat.iteritems(expected)} + for k, v in expected.items()} expected = Panel.fromDict(expected).swapaxes(0, 1) expected.major_axis.name, expected.minor_axis.name = 'A', 'B' @@ -1275,7 +1275,7 @@ def _check_groupby(df, result, keys, field, f=lambda x: x.sum()): tups = lmap(tuple, df[keys].values) tups = com.asarray_tuplesafe(tups) expected = f(df.groupby(tups)[field]) - for k, v in compat.iteritems(expected): + for k, v in expected.items(): assert (result[k] == v) _check_groupby(df, result, ['a', 'b'], 'd') diff --git a/pandas/tests/groupby/test_grouping.py b/pandas/tests/groupby/test_grouping.py index 8382111ec9901..867cb8365476e 100644 --- a/pandas/tests/groupby/test_grouping.py +++ b/pandas/tests/groupby/test_grouping.py @@ -9,7 +9,7 @@ import pandas as pd from pandas import ( - CategoricalIndex, DataFrame, Index, MultiIndex, Series, Timestamp, compat, + CategoricalIndex, DataFrame, Index, MultiIndex, Series, Timestamp, date_range) from pandas.core.groupby.grouper import Grouping import pandas.util.testing as tm @@ -671,14 +671,14 @@ def test_groups(self, df): groups = grouped.groups assert groups is grouped.groups # caching works - for k, v in compat.iteritems(grouped.groups): + for k, v in grouped.groups.items(): assert (df.loc[v]['A'] == k).all() grouped = df.groupby(['A', 'B']) groups = grouped.groups assert groups is grouped.groups # caching works - for k, v in compat.iteritems(grouped.groups): + for k, v in grouped.groups.items(): assert (df.loc[v]['A'] == k[0]).all() assert (df.loc[v]['B'] == k[1]).all() diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py index be266798973d1..3f0656615545c 100644 --- a/pandas/tests/indexes/common.py +++ b/pandas/tests/indexes/common.py @@ -4,7 +4,6 @@ import pytest from pandas._libs.tslib import iNaT -import pandas.compat as compat from pandas.core.dtypes.dtypes import CategoricalDtype @@ -235,7 +234,7 @@ def test_copy_name(self): # gh-12309: Check that the "name" argument # passed at initialization is honored. - for name, index in compat.iteritems(self.indices): + for name, index in self.indices.items(): if isinstance(index, MultiIndex): continue @@ -262,7 +261,7 @@ def test_copy_name(self): def test_ensure_copied_data(self): # Check the "copy" argument of each Index.__new__ is honoured # GH12309 - for name, index in compat.iteritems(self.indices): + for name, index in self.indices.items(): init_kwargs = {} if isinstance(index, PeriodIndex): # Needs "freq" specification: @@ -298,7 +297,7 @@ def test_ensure_copied_data(self): check_same='same') def test_memory_usage(self): - for name, index in compat.iteritems(self.indices): + for name, index in self.indices.items(): result = index.memory_usage() if len(index): index.get_loc(index[0]) @@ -428,7 +427,7 @@ def test_where(self, klass): @pytest.mark.parametrize("method", ["intersection", "union", "difference", "symmetric_difference"]) def test_set_ops_error_cases(self, case, method): - for name, idx in compat.iteritems(self.indices): + for name, idx in self.indices.items(): # non-iterable input msg = "Input must be Index or array-like" @@ -436,7 +435,7 @@ def test_set_ops_error_cases(self, case, method): getattr(idx, method)(case) def test_intersection_base(self): - for name, idx in compat.iteritems(self.indices): + for name, idx in self.indices.items(): first = idx[:5] second = idx[:3] intersect = first.intersection(second) @@ -466,7 +465,7 @@ def test_intersection_base(self): first.intersection([1, 2, 3]) def test_union_base(self): - for name, idx in compat.iteritems(self.indices): + for name, idx in self.indices.items(): first = idx[3:] second = idx[:5] everything = idx @@ -494,7 +493,7 @@ def test_union_base(self): @pytest.mark.parametrize("sort", [None, False]) def test_difference_base(self, sort): - for name, idx in compat.iteritems(self.indices): + for name, idx in self.indices.items(): first = idx[2:] second = idx[:4] answer = idx[4:] @@ -529,7 +528,7 @@ def test_difference_base(self, sort): first.difference([1, 2, 3], sort) def test_symmetric_difference(self): - for name, idx in compat.iteritems(self.indices): + for name, idx in self.indices.items(): first = idx[1:] second = idx[:-1] if isinstance(idx, CategoricalIndex): @@ -560,7 +559,7 @@ def test_symmetric_difference(self): def test_insert_base(self): - for name, idx in compat.iteritems(self.indices): + for name, idx in self.indices.items(): result = idx[1:4] if not len(idx): @@ -571,7 +570,7 @@ def test_insert_base(self): def test_delete_base(self): - for name, idx in compat.iteritems(self.indices): + for name, idx in self.indices.items(): if not len(idx): continue @@ -596,7 +595,7 @@ def test_delete_base(self): def test_equals(self): - for name, idx in compat.iteritems(self.indices): + for name, idx in self.indices.items(): assert idx.equals(idx) assert idx.equals(idx.copy()) assert idx.equals(idx.astype(object)) @@ -682,7 +681,7 @@ def test_numpy_ufuncs(self): # test ufuncs of numpy, see: # http://docs.scipy.org/doc/numpy/reference/ufuncs.html - for name, idx in compat.iteritems(self.indices): + for name, idx in self.indices.items(): for func in [np.exp, np.exp2, np.expm1, np.log, np.log2, np.log10, np.log1p, np.sqrt, np.sin, np.cos, np.tan, np.arcsin, np.arccos, np.arctan, np.sinh, np.cosh, np.tanh, diff --git a/pandas/tests/indexes/datetimes/test_indexing.py b/pandas/tests/indexes/datetimes/test_indexing.py index c3b00133228d8..8bdf4d84427ba 100644 --- a/pandas/tests/indexes/datetimes/test_indexing.py +++ b/pandas/tests/indexes/datetimes/test_indexing.py @@ -4,8 +4,6 @@ import pytest import pytz -import pandas.compat as compat - import pandas as pd from pandas import DatetimeIndex, Index, Timestamp, date_range, notna import pandas.util.testing as tm @@ -413,7 +411,7 @@ def test_delete(self): -1: expected_4, 4: expected_4, 1: expected_1} - for n, expected in compat.iteritems(cases): + for n, expected in cases.items(): result = idx.delete(n) tm.assert_index_equal(result, expected) assert result.name == expected.name @@ -460,7 +458,7 @@ def test_delete_slice(self): cases = {(0, 1, 2): expected_0_2, (7, 8, 9): expected_7_9, (3, 4, 5): expected_3_5} - for n, expected in compat.iteritems(cases): + for n, expected in cases.items(): result = idx.delete(n) tm.assert_index_equal(result, expected) assert result.name == expected.name diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py index fa08315e13600..ed1028b45f5db 100644 --- a/pandas/tests/indexes/datetimes/test_tools.py +++ b/pandas/tests/indexes/datetimes/test_tools.py @@ -22,8 +22,8 @@ import pandas as pd from pandas import ( - DataFrame, DatetimeIndex, Index, NaT, Series, Timestamp, compat, - date_range, isna, to_datetime) + DataFrame, DatetimeIndex, Index, NaT, Series, Timestamp, date_range, isna, + to_datetime) from pandas.core.arrays import DatetimeArray from pandas.core.tools import datetimes as tools from pandas.util import testing as tm @@ -1701,7 +1701,7 @@ def test_parsers_dayfirst_yearfirst(self, cache): (True, True, datetime(2020, 12, 21))]} - for date_str, values in compat.iteritems(cases): + for date_str, values in cases.items(): for dayfirst, yearfirst, expected in values: # odd comparisons across version @@ -1739,7 +1739,7 @@ def test_parsers_timestring(self, cache): cases = {'10:15': (parse('10:15'), datetime(1, 1, 1, 10, 15)), '9:05': (parse('9:05'), datetime(1, 1, 1, 9, 5))} - for date_str, (exp_now, exp_def) in compat.iteritems(cases): + for date_str, (exp_now, exp_def) in cases.items(): result1, _, _ = parsing.parse_time_string(date_str) result2 = to_datetime(date_str) result3 = to_datetime([date_str]) diff --git a/pandas/tests/indexes/timedeltas/test_indexing.py b/pandas/tests/indexes/timedeltas/test_indexing.py index a6264e4dad4f0..7233f53572625 100644 --- a/pandas/tests/indexes/timedeltas/test_indexing.py +++ b/pandas/tests/indexes/timedeltas/test_indexing.py @@ -4,7 +4,7 @@ import pytest import pandas as pd -from pandas import Index, Timedelta, TimedeltaIndex, compat, timedelta_range +from pandas import Index, Timedelta, TimedeltaIndex, timedelta_range import pandas.util.testing as tm @@ -240,7 +240,7 @@ def test_delete(self): -1: expected_4, 4: expected_4, 1: expected_1} - for n, expected in compat.iteritems(cases): + for n, expected in cases.items(): result = idx.delete(n) tm.assert_index_equal(result, expected) assert result.name == expected.name @@ -267,7 +267,7 @@ def test_delete_slice(self): cases = {(0, 1, 2): expected_0_2, (7, 8, 9): expected_7_9, (3, 4, 5): expected_3_5} - for n, expected in compat.iteritems(cases): + for n, expected in cases.items(): result = idx.delete(n) tm.assert_index_equal(result, expected) assert result.name == expected.name diff --git a/pandas/tests/io/json/__pycache__/tmp2c7r4efu b/pandas/tests/io/json/__pycache__/tmp2c7r4efu new file mode 100644 index 0000000000000000000000000000000000000000..d1258abbcdf40e3af6336357c371cbc01bcf942d GIT binary patch literal 42790 zcmeHw3zQqzdENjR00S%*`y{zszCcoP69(z8H1?TO>MP3oFX>pJbJ zllr7R{l5Rs3RvxpVK_?|cxk5@9|8KKIKcgS~cJcn)%D}+tApc%)eYkt zsvE~QRyU1riW&D9Dy^)S3}xAyUrdZ|!Er`qah#Ro0**UW4#zn;-iqT+mB(>jj<>1! zW5!U|-1g(PTXzemYEI#J>14$&JT`IKu9gbr>AF4XlNblt5H-$`hr^(fXMg+v_@tGxc)SK0dBbbUbW3<(iTM6p+F7(lMu0wTF^k zZ^~-3PpkGNwXfCiYp6+ znksuXfujVv&{R3qc{ZW)=Xl+$CzbhPOm(U5vvJjPHr_CbDP`dHy<-<*DER{Kbs@oo zudx>^d9jmTY+^3EXJqui?vYXa_hM=&?wO|?`x!4iBOT;=@zHy|*zuvbvlc&d)*u-& zz1W>z>@F{M#ETvE5@q!~y3l0X;X7HLR`&BlX(z?o#AlS3Rp|VpQ<|Q{i@=va*_C?9 zKasqTaLj81^PE#X-QA-`S`SXEli8bx#Qpxn6e0 zDOXF*nYYFHGlDaob*fyki4b0@Ql7S_ zYeTW0#c#W9C^px7{mz*)BSUWCw%ZEK?iq5ulw-SqW6#2a+7oq~u#^B5lRa~J|8YpY z57aoh&&&G=@AuG!UAh6|<>}xaqqs+TTJEuT$kio|^2d8yO5CqY+&`3eER^9~kHlM3 zoRN+gX7i)9g}b=UUS6~ObFM=c;bjkN(qhT+dWztHm7-(MI1{z%Ov$mm94;5h%+P6g z&`J#yt~XG0PuFI_zerk~tkh1HD#DDsOwlc!0&kshy!@TIvuo3LJ5J5<`ikJr)lywv zhtjn0w-|y8l`e1+b2P4sgJyEiK(Srkm&^j67 zJ=gK2m8WcF*;{bYWz~#WK5oVIzid1kw+sugnKSwg(|8KsdBX(enmC*F&!>$0<^0?Y z?HIhwy+Szd#O*>j{=C4k24ldhKrunN4u}OHr;*pJHVc3@$;Sc2I0GQ=01zh{@kRnb zYyxHp#LtrnS0MQje38#rlGW5am3ck>?e`SF$b7kqt=ELuL?y16yFpI z;LA%r2C30@HFDmKGa7M@F~OpY^ooC#&d2qJX|N`P0QcEh;j@|lfTe=G+SqI|a$0$EEV(1s-u zNS7U=4tYn2IzPoh-KfVFL~|hSV0=Ib6mbx!qb#6JMt=isQa~Hv&LXjIhMJq84aL3= zAP>#k>dAc z3cs*0_PR0d%toSNg3Bkt!^!18P7Y3M4mmsWeU(f#JD&!ZPd8GkU&bAHv|1kVZ!;8CFy1^!Y*i#F)%62Pjmx?<*{_~cT$F(5*8$5QcJsMbKN;8Cd9 zq>PhVv5DtEnWNwlQEpCamBz=yCxcIR(mWqmag}()ae=TeGE%d97NysX8-<0I2x15dR{F>%9UgdF=LVmFpauYGX`)pz|iSmK$ zQvN!>{679pK?WjcL4F{}OKpJhLcQLn9stFl>esbFZoQvlP5dJ%WkYzyKP&eJ-b-m4 z{n9q#sTa2)a9*f3`BYm#wYUunYO7Ff_Nlf-+HkwNPOfe7uU(H43U+E?tJ;CuZBsYA z7_+ylooeXCxb(-3>McTloln0D^!N?vcb|_*I}EFvgkpzJF=F3?$cDj)khONltMNsDSOG;THC}+U1GYtp?!mzUxriEdr!Ehdc zcxx$GEy1w!EKL}8S12m~G6mr}UI~XnbB;MMW4uNo#b<@W00o8STpc-3bTd!(u6jbs z?~_uL{2$3xT^3%%fCBYN2@PIyut{Bp7S4O+7hs0+o%Fv`D8KKV1?y{;uWM+})FOG* zo|%9$ZQ_%}Ck?wT8Rgn`5uH|iCC1EM=#15*EWZo&U4f^ap~>);AvgPOKU2^)N0=XX z^32V_C1m_dFcoTQWc*XmjfYE)Q#zBKmtR4d-c^wHx{ZZECmzbq<9}e$9EC~Kp)Tmf zr)TtA4`VP4iL9)pHS|!u)@nI*YMLf*W^#*X&YT(DGuIWddydlZ$!=;@K=k0CHfZ|x#nG0dk*2b)&ua_g=-!s2{Slrd#>0Z}*u`^z zMZyIej+&24p~6UHV<|9B3s%D@qkUB~(|t>Tj@CFuIq8c+lduW42to4j-mlA|}eL)D*-!xb6qY=P6bg@zSa z4OS3j_FoRHgr$2K+`|@$$usHXa~yJgVzxe^%~@S+{kS;vh-*(M>|}6(2rWw350HWT zhMqN~0@gbJc`wzoN~{W7DHJWtmd@1{?5Ba9L)p^dOGYWP6a6YsHS98tz zP^dt0szb0AQw{1K(}q1@EoZ~XIq4&ImA5a)(zK_Wyy%p|^-xMIGn8(= z9Cj8dS=`C=VB%EHo8xHAOO&PqU%^$uutem$>I~~|;%#_+#u4xQ8phGi<;IckFGcNy=W-uMPoWayyyq6K1T8pjgFR}Qy+N|Bb$oKxLkEXx#^f&mzUv*Y=o~6Dn!-#i&4Z~((R1lT4<3BSwEIrI-Y7op&Sf8e?4~1FH0P8m zZ|Y8~&)9eFnhdeO_qq)+-uY?{F>Q1s197RA=^>W98bd5nvS-y*F&JP~8DMXYr7`h? zT}|vt7|T^>SHJeUjV0dsHH@WY?GVjX+VZZulqy%2{4UE5(O$fl5i|Ll!k-rYhS{5| zNcj8veDAONX!=Ji^>q>cc<0wJnyz0?hYD=i!kc~-g|96B-EuifR7=_6+L=6VLT_%F z!!5vDTf9aPf?$%XWe@-WjN%0$j zGc6X|r&UVkbs4rGBlEf`-%rC*SvbyOr7TMm2ijcV$>IJYW^lFN$cO2<;rorib33Hq zYSIpw`~93yXulDq_5DcXJL3O=Kp1Jv_-5#o+sMeen5@5&G#jjo!E7&< z^vSGm;0TNLbM>wlWlfPk%N?JGPfDW$t>K4TfD3NrlG>xkZ;!+)aaU%n(H;!B(97~# z+XF4OfG3a^LHWPVrwQAnFK81`eOaiw#1Z)GLWMls^7@!x)Gl!KRsH9ZXonnE4L5Sq z4xN5GbS~8n^Bpq#y>3j;P=18({wl&AL5bzDXU2E*y>gG=NK6~w?{|4eU#EapL?@=DQOzKRQ_m;cx30z0qf_r>_VqJUOugzb1iMf5u>F;cHKgK-0lVEwtCF?_@ zPOHxz+ylh5cx0$s9>nX2Jo^>T_3OIi^@RUpYzLWryhP`{sPFO3GDq3sOg!2?V|n9p znA|cIBEb9)P#5zrDI8WCgOrT?R6l61lIQ;s>qzJ-82)hL^I+}ee{5Q?cF0^2lhEX&z~p7 z#O9^t3nV%NBAo%*izG8?LvY^5yB2udfe1;|c_ds3y{)~Rjs~P-_Bg*p%I!=(%;Zy~ zh{k-9m0Zq8dF}5oSzz+3NVI{1#p2d_)pjYxZA zYjJ(h95AJ<2^16lbt88eANOFQx$KE6Q!Xt8ltQqrajY#-2(mSvv9nWLBH3kkVgXbq z7C>>46I>o;*-ZD6R}slauJ-s>`^!ezI3-~r%7}KO2Fi$k!*cxV>(wAjmy)qul)uH| zs$MP_MGg8Q_vi;57di#M+=4}{1^FGK;`qN?>(gTqk-8rD+krb@r*1%Qr{so^yHRp) zL2j4ib|W{8+-`M~8o}b*Q8~K@XZIqvN9|Ml)d6+$WCCXj>fpJAy)HO|<6F)mkVG5R z+BT}SKGk*VR$c037VY>}bx2xjz_0Ud>UMR9I*jG^adoGCAnMlD(kZ! z^#L{N^Az&>sX-|U-rI--F?fN1)6Iej>chE;R=aU$w!>8#@q2K3pr%&$R0_sn!n2hy9wG5!`W zeCR$LHRD3*;xEO2(~H;V;7uEbpVCW}H{!AvyW5K$^J4dSv3q6VE8H44<2=5$@GB#s z+64NYtt@UE6XL!=yGHu!cz8V$JcvXXx`8K}SS&DnBhHZYg z%hRo0zVXuG^3Z*0>epCHOqDE4)g&qYJ1&UTuM^wY}Nn|y+`!hUG z5@T%;VF}o%4r>dW?e??N9PbwP0s6bNA?ipj_2>5+$G0zMI;`l3)Fln<1g zuL%|MpaL(EcFLV`&M)D)|(+o>6_2=xbY?2N8Y?7{2czrS9*th}TIi9`@Wx5iLv z2s#O5@jAq0tJj-hSoLTj0-+(xj$1rkt354&ZxEqPB1x_$edo)3{w4ICJEdLbB$`?z zcn_8dV!(^(Mw|4o^l=Xl8dY@dVnMkG8sbx$L88hTQnl9RuYVR z2Z?TF@+c`Iq15(j`_Nuq+sEV)CLd%n%H;Q%yp_pAOde*^3YK1vs3tnDsSY0W1yK3X z1PGUT1WD5xNHaLvF3~h{5(ErwxVJYl!d#$U~dy;8N>)7iv(rUOQ7ttG!s<mLi2*Sb5#0D^MXg!V-c#SgbGF|1a+AbD&)ZnApc5lcB#kJ6OFEAcme7UUhw3Y z4&Cv)cvqdTBLM<^gZXbVA9H?>`8$}%2o$_p9cwxMkPmQ;fjuG}4zrCmFu92d*@sVT+HCjNB^!het+6#zD-78P7gE za`gDFkwr|Bs?bVfv+`M~BoTyiwIGSa@sART{>Cp&N-36qoPupmUs)=bO)M$LNZ9SD!WgY+D8gh= z*HB?RicSs`Mg)+-%0w!R6lAo8KomtpE`p+nxNkgyDwctekkYCc&Sn@OhT*p*UJ$-f z3gQkiLL5}E9#BB|5EU#5K|*q&3f8_(@^hSxkqB~JC%HzglL&IbI>`YMf+&7$(CZ|j zUL8Wzvm9cpnqaz{z;tz_*v(;lxF1S{G_YCce@J{Z&2S-_1QaMEFEkLFpnMGo74l%X z5Nd?srX~1w5G_{6kZUSmh;R#ryIm_^Ax8QcV3D>SI4g2uVZy?WWpKQJF2YD}a)5W@ zM4*0zY(R!7$oP}+zBm?QA9^F#jKix)e=Bbz`YgfbZ(-9c84bG`rVMRxhz)idk`_%a z24)tk7zrO z@LZe9GY}oPMeYA9an!KOh2fIefT0L_Pb{A>wZRC{()wGXs3|?yvU5q@CfwRVVz@}r zAwJ|#dKw>5ah__y2cSa&Q6&w;XN5`Wyr2O#SqMx@iMlzJY$WPfxeTP>tZ)`c0b`7) z6LF#hW}g$Bc~($NV76g==p3-37otDnQZf|3AVmseUvd)30a(L%a?H~PrkfNLo@gMe zuhYCQ$`b=wtyAkoc?tqq4GL~hUD+hK!9Z4ThhW`_$dC+VwS}dLuwJO=7rK1IjR^{g zI0;n9Bj4jkpF{+)2-S^31%(Mz`-BR4U$X10WEFD86h7#g`C<+@A@&;28lg z1f(8Xk^wKRAXE@4Ex-_v0;mc=C@mlq2rwFi+65SZ5M_plMteDkLzUP04@i!|6)gl8 z=leV@TCd>7E*t`6!s}7+iApnp*4Yj=5}|fALCU@VKOjY8wXvJc6WokOb}Es1L+L~)4Wh)h$%T5qkjR`398Flr{cJ@G zr5aEu2NT^fz5B-^RdDeyJ@aYwLpZ~nMG>Lw_X8a0rqF>!=?|%+@zX03IlwFAw)QCC zPl@4_v2GXzoQ7=f`w>I|Uu?+!UY)US+6>vy8-rLkFXMWKvX{e{Ht-Y_0A#qmfZ~%v zVS?iGLV+A05?ZHk{AQ@nAOg6KZ4*v0A}lJqZc%7Y!6@{CKvg?zttJF|@|q%0r^7eq zBJ)TqYf65!=?g5UDY0CZoT8oAf?e*G-NKH6F(2hf8N|H|h%0n&Y6>aAjyiL zT#u?_r?5$Pm;F4ZRJfIw^VjhP9e2Kh)Jr^m{D_x+&MD16hZvinqa^=YOc~fUw8_p) z8a*j>H)1ctwI4vMdT1|J7jzTg(nm+ zI5>YS6x#FrY2x8UfvHxS^4xmoPutNv+8| zQ-rg}-cM1qIZff4A+bOQV^dF5#370%=v&dU?VJ`xgKg(HCZpTV zwU0^DWuR3HBa+&9cc_hrVOBB29|U6k2y(%;bCJl;F1MrW5MlN6LZOA#U__>AO9;a( zF4}f35Jj~y@Xp;w?>-_vlVFogW1#57zJV`Pr1f=l6Nf7e%=p@euQzH$#xl|4+L`zD zE^Ch1+OAkY)uw!e5ObX76vHK(@^@c!tVNd$Omn1(cnaZ^vxLnN7K*30UG@lz-a)J} z$0ibZIr!JYAvxy~Dtz!3<%k1M+$B6Q1|zvl5BfGXb9z815}-H=)sR;aI2jWJW*ugI z;hLB-L=aPWuj5+w+~TmyALZ0xuw)MT&K928zEK-_<{-FOR`m+cgomkbw(k#l=2wLy zl4t(Da74{BG3r8!|0)#ZnLiZ@%`-9W=$q@gRa>yn?b19GTebC|>9jGb4>=4r+PFLu zRtMMTml(4=rxnQ1I<|~)Y!waM_g>S1>$B2NvE`a(tt%eFzExawOz9P2oIl}RZe?;= z+UA~%j%dBbdc*`(=dh+h$1rKt8XybB1V3$Ca0!T*$$chz*JTg!MYo*8uFEqJmS?mJ z5ltA*zhLqLlYhnJ7npp32@P=0Z!sZbnac;v_`p6!Mw!bXAj-X^=St-=-s;^XqtHFK zzJ+X8A*6bzctAN{CM9jc?ZYLZ3;8}Tu{+?9oDEtFn_@~2adII@%)!*e<+;Hg;U6c3NC2NCg>SCvlspVmDj!mQT_~YTIG$Y9 zmVGR|kdVw4CrLxImEJ2nG+RkHW^KCa9n%=|BwKS80m+9~29nDQ70dlqDKKIsU)ioG zFM$_-@tSz9D{b)y!1gt=&J=rC|Cr6kBCbea*TyV(*-y_nKV@P6i^*e5USUGfOgj=h zj$AYt^TBo>fE_I`L)Y!19^{z2kC?b#BD9Fa&`p_~U=qx_na2CIsk|ApZz;`t*`l34 zLmkJhNxL>vgXwHMjU0)`Gp9;V+pXI$P@Z7x9l^(?7unYw6J(gX{_$ydc4nsLU`z8t&B1o$*zL5e3eo@tu0p-F@a}nT zzl3eQVQSYVb%$Dl^Iwo)t9aS*TRIeCCZSiJM)$`~9!fGq;#LuD+2-(V?H^%-Tlk#; z1jm$TY;I+N>|*7G3J*b!z8VQkdcrFMlb*de6L?}+^vDvQ z3=0#Q+9v}(Ry+f+g{cHi(*f9ikIQky^MYVIpav_sYG(+x8^j9(J{W4Vc43D&cZ!}A znD`{z7Fe3V?bdqtLXYo-(F+R11&|5QjmY=;6QG#dj8GjED!?nIN$(OW;0o+~pT&u`_9}Daph zNB!xGcknJEOG>9+mO$C9ksijDNi54kNaMdth#`n_QdE3OMO@@`D11cYoD?PSEaPKQ zSXy{&7C+a4Jx6y21HA2yPgs-EHAm`yE)uVAng%H@tHZrPx>kJE^au9kQwlQv40F^AZ9@a8kN2me5LJhDEfN(xa4UjoN z1vk$K?5dw8g;-wyB`JK%D}%J;8_*Xa?FE$I6-rJ{{y?s3dk=%P- zeB92~Jj>)|B#Q!lIG@DXAzwWeaOohej2p-?S0*)fY5_&iJi)@?Ul29!iv0Ki-tW^) znnL7ATdN)RwS-&40 zcR}n^lmMWSn6uO6IwKTJ)GD*pX;*|uytd!KWxdvJlsO6{6^;ty#GlpgNv=O5)z1g*aw;}k+xwY)4fppP%+?*oMw@@rCc3ut0SQ1$60O74%~kK2hj z7@SjF6V55Fg$gPQ4cyP*I_4T7LH;Ikfq(B>$piF#I86(jF5|%~*1&{-)8JSzV<<}o z2hkFEL3-FjqBCm#88#c=4Yv7X__+N@CgJG#&K&uQGS(X(j6Y*S*!11tI0#^>jSr7_ zvFF=*^=H{bm9kr(s8wff{}FqVC8vzJW{B>n>8V;So+?#b`;2t`@S#c#T911>GC>(y zH+~9*Xe|Rp8d~BNb+-$LsMcQnMOEnyJJb0msQSg=5|QKrxJ0Nr(Ie4ovWAJ)pcb1Y zO`SAk4*3fTMErRMC+JZ+t!h;-$?s|B6wW!5Oem*Zky+DU%_PE-D7k!J+NVH|^Bm2Z zZLfycCj-8<&;K*u5i&s|t|vOz#=0ZYsA(M2(&+XjaiF!Vq3hbh!8t@Z=MS0uM<)M? z$saNKV`eB2o%h||_(t+NhU zJyx&PZ&{nun-P<3AhtPYr4X;JOZ#GQVo6&C7{`rP1EvG1jcIF*g?ztwZqRC=sTkKS zhw~|3%UG5b+Zfx7Rb@L;a`y?`IQ%1i9LC3e9u>BYae%{k+=iz#0!h(0nLs|JOrDua zD@3|bsYw&j(lmtxK-GvkX*P|Is>eSP5fdvbcx|n1O=Vzc?18az;4I^wt+%a(EIu8w zytG?&qhuIDab-|nnTlLdSmed9Rk-TMpHN$++)X+zg}Ppjc7W~%{M*2z$qb^N?NmdI z#fA89x#)8#6K2=ubKHp9b*i_ZZoAZOsoij-zKB_ImgO;09b#Ab<>eW}0@1y?{BrlB zy|_e{aow0Qk8h+VQ;BqjC>IhLtedee@Y^002Mizb*0ha=N9~fb9nTUaxHw()gFc9M zB*t;)Y1erQ9p{-apd(tPgdz~bK?tM5FAz{m!UwudRw3cJ#MtA-M!nb`FSgfHVTm1XeJN!)S@Z>4^%LbQv4tq%kXLOH+wkJvh9BD`JaS52%o;8kuW$}7Kp`XFv zApUSZf^&vYqUiAlPd`x5oUQ%;Qka)ZL(xf{hej^^6+ZsmD9dMw_!8OcclD`n@XLxs z3EPen&UH*Ew0ao_{Z-_IfCB@$;r04z>M2I6EZWb^mMTzccaVON$gq(_X~&+R7AcDZ zyhLrbuI-FX*bt3>gMe&!gQA3cD#D(KFi$^0Ij)6bjov|w`Fw0{bjcS<$Gus)3@;mo zoOG9t%jRWBF;|=Bi_zRd1H3PMF`TcU4q*?mcPO_>4?T*^CH4^FmLYf#g%WpuWISQMrN(4trs;L)XE%rT74P^{KLVZO?H{x*}ZA@O4OEfF}dMP2HMz{6Jl zPJ|ATE!y5OzC?l(&JD;x&yv@v`TmF?*7g$h3JOpPckI*lv|E1G?&C`{h&vgaJixzv z+>bJtKn@Ew0SE1i#G|iKOJQDz&uRczuD0_*3u@Z=lDG|lM&716GE~EY_lfVtBp50K z5C$q1kHPxzejw!=LUt(X6X^QkOeRs-C3s;RZx-ev@mA1To0cOUaUy{rjk&|{tyniU zp19L-ykw~|bGp=~Hpm+?p18}lJ@YOYhaF5S7#FDfT%bI9L0UBI<#BUtwo(~4?=DZf zO5QjqKiSJU7hT2ov_Fb2D?z0WG;9URaL{KgLH4ON`pABg1f!q(3`|-XDa7 zPtWXpjM@QX(e&aOA?5c|SLn}6DMQ=6bg`&v6UCxuF~D6N@3n-o@YWY~4DDepl$2}J z!;bx&Q?A=yCOk!HM9w+)pkj=0RjDBAG>H9(g5TlkBqC$YoRP@edadJn?XWUDQFH8J zy;F++rPV0DdlKRrfxMx+mR#L%6cD^V|F^UCazz*$>`<_>;s}uqVI8DdpVHL$2A{B8 z8>WagtR)~nawVlftp`)$(5u#Hbk|O5WPlRST2votsiiu?vkp~jYPMqEj{9kkctqbY hdJRkgpjWJTKjQb~V!iQ=$?l`wPj