diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index a66d00fff9714..8151d6ca3b193 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1697,10 +1697,15 @@ def to_parquet(self, fname, engine='auto', compression='snappy',
.. versionadded:: 0.21.0
+ This function writes the dataframe as a `parquet file
+ `_. You can choose different parquet
+ backends, and have the option of compression. See
+ :ref:`the user guide ` for more details.
+
Parameters
----------
fname : str
- string file path
+ String file path.
engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto'
Parquet library to use. If 'auto', then the option
``io.parquet.engine`` is used. The default ``io.parquet.engine``
@@ -1708,8 +1713,31 @@ def to_parquet(self, fname, engine='auto', compression='snappy',
'pyarrow' is unavailable.
compression : {'snappy', 'gzip', 'brotli', None}, default 'snappy'
Name of the compression to use. Use ``None`` for no compression.
- kwargs
- Additional keyword arguments passed to the engine
+ **kwargs
+ Additional arguments passed to the parquet library. See
+ :ref:`pandas io ` for more details.
+
+ See Also
+ --------
+ read_parquet : Read a parquet file.
+ DataFrame.to_csv : Write a csv file.
+ DataFrame.to_sql : Write to a sql table.
+ DataFrame.to_hdf : Write to hdf.
+
+ Notes
+ -----
+ This function requires either the `fastparquet
+ `_ or `pyarrow
+ `_ library.
+
+ Examples
+ --------
+ >>> df = pd.DataFrame(data={'col1': [1, 2], 'col2': [3, 4]})
+ >>> df.to_parquet('df.parquet.gzip', compression='gzip')
+ >>> pd.read_parquet('df.parquet.gzip')
+ col1 col2
+ 0 1 3
+ 1 2 4
"""
from pandas.io.parquet import to_parquet
to_parquet(self, fname, engine,