@@ -1784,19 +1784,47 @@ def to_parquet(self, fname, engine='auto', compression='snappy',
1784
1784
1785
1785
.. versionadded:: 0.21.0
1786
1786
1787
+ This function writes the dataframe as a `parquet file
1788
+ <https://parquet.apache.org/>`_. You can choose different parquet
1789
+ backends, and have the option of compression. See
1790
+ :ref:`the user guide <io.parquet>` for more details.
1791
+
1787
1792
Parameters
1788
1793
----------
1789
1794
fname : str
1790
- string file path
1795
+ String file path.
1791
1796
engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto'
1792
1797
Parquet library to use. If 'auto', then the option
1793
1798
``io.parquet.engine`` is used. The default ``io.parquet.engine``
1794
1799
behavior is to try 'pyarrow', falling back to 'fastparquet' if
1795
1800
'pyarrow' is unavailable.
1796
1801
compression : {'snappy', 'gzip', 'brotli', None}, default 'snappy'
1797
1802
Name of the compression to use. Use ``None`` for no compression.
1798
- kwargs
1799
- Additional keyword arguments passed to the engine
1803
+ **kwargs
1804
+ Additional arguments passed to the parquet library. See
1805
+ :ref:`pandas io <io.parquet>` for more details.
1806
+
1807
+ See Also
1808
+ --------
1809
+ read_parquet : Read a parquet file.
1810
+ DataFrame.to_csv : Write a csv file.
1811
+ DataFrame.to_sql : Write to a sql table.
1812
+ DataFrame.to_hdf : Write to hdf.
1813
+
1814
+ Notes
1815
+ -----
1816
+ This function requires either the `fastparquet
1817
+ <https://pypi.python.org/pypi/fastparquet>`_ or `pyarrow
1818
+ <https://arrow.apache.org/docs/python/>`_ library.
1819
+
1820
+ Examples
1821
+ --------
1822
+ >>> df = pd.DataFrame(data={'col1': [1, 2], 'col2': [3, 4]})
1823
+ >>> df.to_parquet('df.parquet.gzip', compression='gzip')
1824
+ >>> pd.read_parquet('df.parquet.gzip')
1825
+ col1 col2
1826
+ 0 1 3
1827
+ 1 2 4
1800
1828
"""
1801
1829
from pandas .io .parquet import to_parquet
1802
1830
to_parquet (self , fname , engine ,
0 commit comments