118
118
)
119
119
from pandas .core .dtypes .missing import isna , notna
120
120
121
- from pandas .core import algorithms , common as com , nanops , ops
121
+ from pandas .core import algorithms , common as com , generic , nanops , ops
122
122
from pandas .core .accessor import CachedAccessor
123
123
from pandas .core .aggregation import (
124
124
aggregate ,
@@ -2066,6 +2066,7 @@ def _from_arrays(
2066
2066
)
2067
2067
return cls (mgr )
2068
2068
2069
+ @doc (storage_options = generic ._shared_docs ["storage_options" ])
2069
2070
@deprecate_kwarg (old_arg_name = "fname" , new_arg_name = "path" )
2070
2071
def to_stata (
2071
2072
self ,
@@ -2118,7 +2119,7 @@ def to_stata(
2118
2119
variable_labels : dict
2119
2120
Dictionary containing columns as keys and variable labels as
2120
2121
values. Each label must be 80 characters or smaller.
2121
- version : {114, 117, 118, 119, None}, default 114
2122
+ version : {{ 114, 117, 118, 119, None} }, default 114
2122
2123
Version to use in the output dta file. Set to None to let pandas
2123
2124
decide between 118 or 119 formats depending on the number of
2124
2125
columns in the frame. Version 114 can be read by Stata 10 and
@@ -2147,23 +2148,17 @@ def to_stata(
2147
2148
compression : str or dict, default 'infer'
2148
2149
For on-the-fly compression of the output dta. If string, specifies
2149
2150
compression mode. If dict, value at key 'method' specifies
2150
- compression mode. Compression mode must be one of {'infer', 'gzip',
2151
- 'bz2', 'zip', 'xz', None}. If compression mode is 'infer' and
2151
+ compression mode. Compression mode must be one of {{ 'infer', 'gzip',
2152
+ 'bz2', 'zip', 'xz', None}} . If compression mode is 'infer' and
2152
2153
`fname` is path-like, then detect compression from the following
2153
2154
extensions: '.gz', '.bz2', '.zip', or '.xz' (otherwise no
2154
- compression). If dict and compression mode is one of {'zip',
2155
- 'gzip', 'bz2'}, or inferred as one of the above, other entries
2155
+ compression). If dict and compression mode is one of {{ 'zip',
2156
+ 'gzip', 'bz2'}} , or inferred as one of the above, other entries
2156
2157
passed as additional compression options.
2157
2158
2158
2159
.. versionadded:: 1.1.0
2159
2160
2160
- storage_options : dict, optional
2161
- Extra options that make sense for a particular storage connection, e.g.
2162
- host, port, username, password, etc., if using a URL that will
2163
- be parsed by ``fsspec``, e.g., starting "s3://", "gcs://". An error
2164
- will be raised if providing this argument with a local path or
2165
- a file-like buffer. See the fsspec and backend storage implementation
2166
- docs for the set of allowed keys and values.
2161
+ {storage_options}
2167
2162
2168
2163
.. versionadded:: 1.2.0
2169
2164
@@ -2186,9 +2181,9 @@ def to_stata(
2186
2181
2187
2182
Examples
2188
2183
--------
2189
- >>> df = pd.DataFrame({'animal': ['falcon', 'parrot', 'falcon',
2184
+ >>> df = pd.DataFrame({{ 'animal': ['falcon', 'parrot', 'falcon',
2190
2185
... 'parrot'],
2191
- ... 'speed': [350, 18, 361, 15]})
2186
+ ... 'speed': [350, 18, 361, 15]}} )
2192
2187
>>> df.to_stata('animals.dta') # doctest: +SKIP
2193
2188
"""
2194
2189
if version not in (114 , 117 , 118 , 119 , None ):
@@ -2255,6 +2250,7 @@ def to_feather(self, path: FilePathOrBuffer[AnyStr], **kwargs) -> None:
2255
2250
@doc (
2256
2251
Series .to_markdown ,
2257
2252
klass = _shared_doc_kwargs ["klass" ],
2253
+ storage_options = _shared_docs ["storage_options" ],
2258
2254
examples = """Examples
2259
2255
--------
2260
2256
>>> df = pd.DataFrame(
@@ -2307,6 +2303,7 @@ def to_markdown(
2307
2303
handles .handle .writelines (result )
2308
2304
return None
2309
2305
2306
+ @doc (storage_options = generic ._shared_docs ["storage_options" ])
2310
2307
@deprecate_kwarg (old_arg_name = "fname" , new_arg_name = "path" )
2311
2308
def to_parquet (
2312
2309
self ,
@@ -2340,12 +2337,12 @@ def to_parquet(
2340
2337
2341
2338
Previously this was "fname"
2342
2339
2343
- engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto'
2340
+ engine : {{ 'auto', 'pyarrow', 'fastparquet'} }, default 'auto'
2344
2341
Parquet library to use. If 'auto', then the option
2345
2342
``io.parquet.engine`` is used. The default ``io.parquet.engine``
2346
2343
behavior is to try 'pyarrow', falling back to 'fastparquet' if
2347
2344
'pyarrow' is unavailable.
2348
- compression : {'snappy', 'gzip', 'brotli', None}, default 'snappy'
2345
+ compression : {{ 'snappy', 'gzip', 'brotli', None} }, default 'snappy'
2349
2346
Name of the compression to use. Use ``None`` for no compression.
2350
2347
index : bool, default None
2351
2348
If ``True``, include the dataframe's index(es) in the file output.
@@ -2365,13 +2362,7 @@ def to_parquet(
2365
2362
2366
2363
.. versionadded:: 0.24.0
2367
2364
2368
- storage_options : dict, optional
2369
- Extra options that make sense for a particular storage connection, e.g.
2370
- host, port, username, password, etc., if using a URL that will
2371
- be parsed by ``fsspec``, e.g., starting "s3://", "gcs://". An error
2372
- will be raised if providing this argument with a local path or
2373
- a file-like buffer. See the fsspec and backend storage implementation
2374
- docs for the set of allowed keys and values.
2365
+ {storage_options}
2375
2366
2376
2367
.. versionadded:: 1.2.0
2377
2368
@@ -2398,7 +2389,7 @@ def to_parquet(
2398
2389
2399
2390
Examples
2400
2391
--------
2401
- >>> df = pd.DataFrame(data={'col1': [1, 2], 'col2': [3, 4]})
2392
+ >>> df = pd.DataFrame(data={{ 'col1': [1, 2], 'col2': [3, 4]} })
2402
2393
>>> df.to_parquet('df.parquet.gzip',
2403
2394
... compression='gzip') # doctest: +SKIP
2404
2395
>>> pd.read_parquet('df.parquet.gzip') # doctest: +SKIP
0 commit comments