diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 17cc76e703631..d8a23ad40619b 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1088,17 +1088,15 @@ def to_json(self, path_or_buf=None, orient=None, date_format='epoch', lines=lines) def to_hdf(self, path_or_buf, key, **kwargs): - """Activate the HDFStore. + """Write the contained data to an HDF5 file using HDFStore. Parameters ---------- path_or_buf : the path (string) or HDFStore object key : string indentifier for the group in the store - mode : optional, {'a', 'w', 'r', 'r+'}, default 'a' + mode : optional, {'a', 'w', 'r+'}, default 'a' - ``'r'`` - Read-only; no data can be modified. ``'w'`` Write; a new file is created (an existing file with the same name would be deleted). @@ -1116,6 +1114,9 @@ def to_hdf(self, path_or_buf, key, **kwargs): / selecting subsets of the data append : boolean, default False For Table formats, append the input data to the existing + data_columns : list of columns to create as data columns, or True to + use all columns. See + `here `__ # noqa complevel : int, 1-9, default 0 If a complib is specified compression will be applied where possible @@ -1126,7 +1127,6 @@ def to_hdf(self, path_or_buf, key, **kwargs): If applying compression use the fletcher32 checksum dropna : boolean, default False. If true, ALL nan rows will not be written to store. - """ from pandas.io import pytables diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index aa38958f6c92e..9b3cbb635b454 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -859,6 +859,9 @@ def put(self, key, value, format=None, append=False, **kwargs): append : boolean, default False This will force Table format, append the input data to the existing. + data_columns : list of columns to create as data columns, or True to + use all columns. See + `here `__ # noqa encoding : default None, provide an encoding for strings dropna : boolean, default False, do not write an ALL nan row to the store settable by the option 'io.hdf.dropna_table' @@ -936,7 +939,8 @@ def append(self, key, value, format=None, append=True, columns=None, append : boolean, default True, append the input data to the existing data_columns : list of columns to create as data columns, or True to - use all columns + use all columns. See + `here `__ # noqa min_itemsize : dict of columns that specify minimum string sizes nan_rep : string to use as string nan represenation chunksize : size to chunk the writing @@ -944,6 +948,7 @@ def append(self, key, value, format=None, append=True, columns=None, encoding : default None, provide an encoding for strings dropna : boolean, default False, do not write an ALL nan row to the store settable by the option 'io.hdf.dropna_table' + Notes ----- Does *not* check if data being appended overlaps with existing