Skip to content

ENH: Read from compressed data sources #11677

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 5 commits into from
Closed
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion doc/source/whatsnew/v0.18.0.txt
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ New features
Other enhancements
^^^^^^^^^^^^^^^^^^


- `read_pickle` can now unpickle from compressed files (:issue:`<num>`).
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

add in the actual number here (11666)




Expand Down
40 changes: 30 additions & 10 deletions pandas/io/parsers.py
Original file line number Diff line number Diff line change
Expand Up @@ -234,18 +234,28 @@ class ParserWarning(Warning):
fields if it is not spaces (e.g., '~').
""" % (_parser_params % _fwf_widths)

def get_compression(filepath_or_buffer, encoding, compression_kwd):
"""
Determine the compression type of a file or buffer.
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

move this entire function to pandas.io.common; call it maybe get_compression_type.

This should be a bit more lightweight as you can see there are already other functions which use the compression. This will simply infer from a keywork and/or file extension and return the type of compression. Which can then be passed to other routines.


def _read(filepath_or_buffer, kwds):
"Generic reader of line files."
encoding = kwds.get('encoding', None)
skipfooter = kwds.pop('skipfooter', None)
if skipfooter is not None:
kwds['skip_footer'] = skipfooter

Parameters
----------
filepath_or_buffer : string
File path
encoding: string
Encoding type
compression_kwd: {'gzip', 'bz2', 'infer', None}
Compression type ('infer' looks for the file extensions .gz and .bz2, using gzip and bz2 to decompress
respectively).

Returns
-------
compression : {'gzip', 'bz2', None} depending on result
"""
# If the input could be a filename, check for a recognizable compression extension.
# If we're reading from a URL, the `get_filepath_or_buffer` will use header info
# to determine compression, so use what it finds in that case.
inferred_compression = kwds.get('compression')
inferred_compression = compression_kwd
if inferred_compression == 'infer':
if isinstance(filepath_or_buffer, compat.string_types):
if filepath_or_buffer.endswith('.gz'):
Expand All @@ -259,8 +269,18 @@ def _read(filepath_or_buffer, kwds):

filepath_or_buffer, _, compression = get_filepath_or_buffer(filepath_or_buffer,
encoding,
compression=kwds.get('compression', None))
kwds['compression'] = inferred_compression if compression == 'infer' else compression
compression=compression_kwd)
return inferred_compression if compression == 'infer' else compression


def _read(filepath_or_buffer, kwds):
"Generic reader of line files."
encoding = kwds.get('encoding', None)
skipfooter = kwds.pop('skipfooter', None)
if skipfooter is not None:
kwds['skip_footer'] = skipfooter

kwds['compression'] = get_compression(filepath_or_buffer, encoding, kwds['compression'])

if kwds.get('date_parser', None) is not None:
if isinstance(kwds['parse_dates'], bool):
Expand Down
14 changes: 10 additions & 4 deletions pandas/io/pickle.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
from pandas.compat import cPickle as pkl, pickle_compat as pc, PY3
from pandas.io.common import _get_handle
from pandas.io.parsers import get_compression

def to_pickle(obj, path):
"""
Expand All @@ -14,7 +16,7 @@ def to_pickle(obj, path):
pkl.dump(obj, f, protocol=pkl.HIGHEST_PROTOCOL)


def read_pickle(path):
def read_pickle(path, compression_arg='infer'):
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

should be named compression

"""
Load pickled pandas object (or any other pickled object) from the specified
file path
Expand All @@ -26,6 +28,9 @@ def read_pickle(path):
----------
path : string
File path
compression_arg: {'gzip', 'bz2', 'infer', None}, default 'infer'
Compression type, ('infer' looks for the file extensions .gz and .bz2, using gzip and bz2 to decompress
respectively).

Returns
-------
Expand All @@ -41,19 +46,20 @@ def try_read(path, encoding=None):

# cpickle
# GH 6899
compression = get_compression(path, encoding, compression_arg)
try:
with open(path, 'rb') as fh:
with _get_handle(path, 'rb', encoding, compression) as fh:
return pkl.load(fh)
except (Exception) as e:

# reg/patched pickle
try:
with open(path, 'rb') as fh:
with _get_handle(path, 'rb', encoding, compression) as fh:
return pc.load(fh, encoding=encoding, compat=False)

# compat pickle
except:
with open(path, 'rb') as fh:
with _get_handle(path, 'rb', encoding, compression) as fh:
return pc.load(fh, encoding=encoding, compat=True)

try:
Expand Down