Skip to content

Remove read_table deprecation #27102

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 4 commits into from
Jun 28, 2019
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions doc/source/whatsnew/v0.25.0.rst
Original file line number Diff line number Diff line change
Expand Up @@ -872,6 +872,7 @@ Other
- Allow :class:`Index` and :class:`RangeIndex` to be passed to numpy ``min`` and ``max`` functions (:issue:`26125`)
- Use actual class name in repr of empty objects of a ``Series`` subclass (:issue:`27001`).
- Bug in :class:`DataFrame` where passing an object array of timezone-aware `datetime` objects would incorrectly raise ``ValueError`` (:issue:`13287`)
- func:`read_table` has been undeprecated (:issue:`25220`)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Maybe add a note in the v0.24.0 docs as well that the deprecation was removed in 0.25.0 for people reading that?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Also, not sure where it first better, but I don't think the bugs section is were to put it. Maybe in the deprecations section, since it is most related to that?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Good idea. I editing the whatsnew entry in v0.24.0 for the read_table deprecation to mention it was undeprecated in 0.25.0


.. _whatsnew_0.250.contributors:

Expand Down
26 changes: 2 additions & 24 deletions pandas/io/parsers.py
Original file line number Diff line number Diff line change
Expand Up @@ -540,14 +540,8 @@ def _read(filepath_or_buffer: FilePathOrBuffer, kwds):

def _make_parser_function(name, default_sep=','):

# prepare read_table deprecation
if name == "read_table":
sep = False
else:
sep = default_sep

def parser_f(filepath_or_buffer: FilePathOrBuffer,
sep=sep,
sep=default_sep,
delimiter=None,

# Column and Index Locations and Names
Expand Down Expand Up @@ -613,19 +607,6 @@ def parser_f(filepath_or_buffer: FilePathOrBuffer,
memory_map=False,
float_precision=None):

# deprecate read_table GH21948
if name == "read_table":
if sep is False and delimiter is None:
warnings.warn("read_table is deprecated, use read_csv "
"instead, passing sep='\\t'.",
FutureWarning, stacklevel=2)
else:
warnings.warn("read_table is deprecated, use read_csv "
"instead.",
FutureWarning, stacklevel=2)
if sep is False:
sep = default_sep

# gh-23761
#
# When a dialect is passed, it overrides any of the overlapping
Expand Down Expand Up @@ -732,10 +713,7 @@ def parser_f(filepath_or_buffer: FilePathOrBuffer,
read_table = _make_parser_function('read_table', default_sep='\t')
read_table = Appender(_doc_read_csv_and_table.format(
func_name='read_table',
summary="""Read general delimited file into DataFrame.

.. deprecated:: 0.24.0
Use :func:`pandas.read_csv` instead, passing ``sep='\\t'`` if necessary.""",
summary='Read general delimited file into DataFrame.',
_default_sep=r"'\\t' (tab-stop)")
)(read_table)

Expand Down
10 changes: 4 additions & 6 deletions pandas/tests/io/parser/test_common.py
Original file line number Diff line number Diff line change
Expand Up @@ -1917,16 +1917,14 @@ def test_read_csv_memory_growth_chunksize(all_parsers):
pass


def test_read_table_deprecated(all_parsers):
def test_read_table_equivalency_to_read_csv(all_parsers):
# see gh-21948
# As of 0.25.0, read_table is undeprecated
parser = all_parsers
data = "a\tb\n1\t2\n3\t4"
expected = parser.read_csv(StringIO(data), sep="\t")

with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = parser.read_table(StringIO(data))
tm.assert_frame_equal(result, expected)
result = parser.read_table(StringIO(data))
tm.assert_frame_equal(result, expected)


def test_first_row_bom(all_parsers):
Expand Down
27 changes: 2 additions & 25 deletions pandas/tests/io/test_common.py
Original file line number Diff line number Diff line change
Expand Up @@ -160,6 +160,7 @@ def test_read_non_existant(self, reader, module, error_class, fn_ext):

@pytest.mark.parametrize('reader, module, error_class, fn_ext', [
(pd.read_csv, 'os', FileNotFoundError, 'csv'),
(pd.read_table, 'os', FileNotFoundError, 'csv'),
(pd.read_fwf, 'os', FileNotFoundError, 'txt'),
(pd.read_excel, 'xlrd', FileNotFoundError, 'xlsx'),
(pd.read_feather, 'feather', Exception, 'feather'),
Expand Down Expand Up @@ -191,18 +192,9 @@ def test_read_expands_user_home_dir(self, reader, module,
msg1, msg2, msg3, msg4, msg5)):
reader(path)

def test_read_non_existant_read_table(self):
path = os.path.join(HERE, 'data', 'does_not_exist.' + 'csv')
msg1 = r"File b'.+does_not_exist\.csv' does not exist"
msg2 = (r"\[Errno 2\] File .+does_not_exist\.csv does not exist:"
r" '.+does_not_exist\.csv'")
with pytest.raises(FileNotFoundError, match=r"({}|{})".format(
msg1, msg2)):
with tm.assert_produces_warning(FutureWarning):
pd.read_table(path)

@pytest.mark.parametrize('reader, module, path', [
(pd.read_csv, 'os', ('io', 'data', 'iris.csv')),
(pd.read_table, 'os', ('io', 'data', 'iris.csv')),
(pd.read_fwf, 'os', ('io', 'data', 'fixed_width_format.txt')),
(pd.read_excel, 'xlrd', ('io', 'data', 'test1.xlsx')),
(pd.read_feather, 'feather', ('io', 'data', 'feather-0_3_1.feather')),
Expand All @@ -228,21 +220,6 @@ def test_read_fspath_all(self, reader, module, path, datapath):
else:
tm.assert_frame_equal(result, expected)

def test_read_fspath_all_read_table(self, datapath):
path = datapath('io', 'data', 'iris.csv')

mypath = CustomFSPath(path)
with tm.assert_produces_warning(FutureWarning):
result = pd.read_table(mypath)
with tm.assert_produces_warning(FutureWarning):
expected = pd.read_table(path)

if path.endswith('.pickle'):
# categorical
tm.assert_categorical_equal(result, expected)
else:
tm.assert_frame_equal(result, expected)

@pytest.mark.parametrize('writer_name, writer_kwargs, module', [
('to_csv', {}, 'os'),
('to_excel', {'engine': 'xlwt'}, 'xlwt'),
Expand Down