Skip to content

DOC: updates documentations Closes #21749 #42979

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 5 commits into from
Aug 17, 2021
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 5 additions & 6 deletions pandas/_testing/_io.py
Original file line number Diff line number Diff line change
Expand Up @@ -160,19 +160,18 @@ def network(
Tests decorated with @network will fail if it's possible to make a network
connection to another URL (defaults to google.com)::

>>> from pandas._testing import network
>>> from pandas.io.common import urlopen
>>> @network
>>> import pandas as pd
>>> @pd._testing.network
... def test_network():
... with urlopen("rabbit://bonanza.com"):
... with pd.io.common.urlopen("rabbit://bonanza.com"):
... pass
Traceback
...
URLError: <urlopen error unknown url type: rabit>

You can specify alternative URLs::

>>> @network("https://www.yahoo.com")
>>> @pd._testing.network("https://www.yahoo.com")
... def test_something_with_yahoo():
... raise IOError("Failure Message")
>>> test_something_with_yahoo()
Expand All @@ -183,7 +182,7 @@ def network(
If you set check_before_test, it will check the url first and not run the
test on failure::

>>> @network("failing://url.blaher", check_before_test=True)
>>> @pd._testing.network("failing://url.blaher", check_before_test=True)
... def test_something():
... print("I ran!")
... raise ValueError("Failure")
Expand Down
20 changes: 10 additions & 10 deletions pandas/_testing/asserters.py
Original file line number Diff line number Diff line change
Expand Up @@ -303,10 +303,10 @@ def assert_index_equal(

Examples
--------
>>> from pandas.testing import assert_index_equal
>>> import pandas as pd
>>> a = pd.Index([1, 2, 3])
>>> b = pd.Index([1, 2, 3])
>>> assert_index_equal(a, b)
>>> pd.testing.assert_index_equal(a, b)
"""
__tracebackhide__ = True

Expand Down Expand Up @@ -794,10 +794,10 @@ def assert_extension_array_equal(

Examples
--------
>>> from pandas.testing import assert_extension_array_equal
>>> import pandas as pd
>>> a = pd.Series([1, 2, 3, 4])
>>> b, c = a.array, a.array
>>> assert_extension_array_equal(b, c)
>>> pd.testing.assert_extension_array_equal(b, c)
"""
if check_less_precise is not no_default:
warnings.warn(
Expand Down Expand Up @@ -938,10 +938,10 @@ def assert_series_equal(

Examples
--------
>>> from pandas.testing import assert_series_equal
>>> import pandas as pd
>>> a = pd.Series([1, 2, 3, 4])
>>> b = pd.Series([1, 2, 3, 4])
>>> assert_series_equal(a, b)
>>> pd.testing.assert_series_equal(a, b)
"""
__tracebackhide__ = True

Expand Down Expand Up @@ -1203,17 +1203,17 @@ def assert_frame_equal(
This example shows comparing two DataFrames that are equal
but with columns of differing dtypes.

>>> from pandas._testing import assert_frame_equal
>>> import pandas as pd
>>> df1 = pd.DataFrame({'a': [1, 2], 'b': [3, 4]})
>>> df2 = pd.DataFrame({'a': [1, 2], 'b': [3.0, 4.0]})

df1 equals itself.

>>> assert_frame_equal(df1, df1)
>>> pd._testing.assert_frame_equal(df1, df1)

df1 differs from df2 as column 'b' is of a different type.

>>> assert_frame_equal(df1, df2)
>>> pd._testing.assert_frame_equal(df1, df2)
Traceback (most recent call last):
...
AssertionError: Attributes of DataFrame.iloc[:, 1] (column name="b") are different
Expand All @@ -1224,7 +1224,7 @@ def assert_frame_equal(

Ignore differing dtypes in columns with check_dtype.

>>> assert_frame_equal(df1, df2, check_dtype=False)
>>> pd._testing.assert_frame_equal(df1, df2, check_dtype=False)
"""
__tracebackhide__ = True

Expand Down
8 changes: 4 additions & 4 deletions pandas/core/algorithms.py
Original file line number Diff line number Diff line change
Expand Up @@ -1462,20 +1462,20 @@ def take(

Examples
--------
>>> from pandas.api.extensions import take
>>> import pandas as pd

With the default ``allow_fill=False``, negative numbers indicate
positional indices from the right.

>>> take(np.array([10, 20, 30]), [0, 0, -1])
>>> pd.api.extensions.take(np.array([10, 20, 30]), [0, 0, -1])
array([10, 10, 30])

Setting ``allow_fill=True`` will place `fill_value` in those positions.

>>> take(np.array([10, 20, 30]), [0, 0, -1], allow_fill=True)
>>> pd.api.extensions.take(np.array([10, 20, 30]), [0, 0, -1], allow_fill=True)
array([10., 10., nan])

>>> take(np.array([10, 20, 30]), [0, 0, -1], allow_fill=True,
>>> pd.api.extensions.take(np.array([10, 20, 30]), [0, 0, -1], allow_fill=True,
... fill_value=-10)
array([ 10, 10, -10])
"""
Expand Down
4 changes: 2 additions & 2 deletions pandas/core/arrays/sparse/array.py
Original file line number Diff line number Diff line change
Expand Up @@ -282,8 +282,8 @@ class SparseArray(OpsMixin, PandasObject, ExtensionArray):

Examples
--------
>>> from pandas.arrays import SparseArray
>>> arr = SparseArray([0, 0, 1, 2])
>>> import pandas as pd
>>> arr = pd.arrays.SparseArray([0, 0, 1, 2])
>>> arr
[0, 0, 1, 2]
Fill: 0
Expand Down
7 changes: 3 additions & 4 deletions pandas/core/dtypes/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -387,10 +387,9 @@ def register_extension_dtype(cls: type_t[ExtensionDtypeT]) -> type_t[ExtensionDt

Examples
--------
>>> from pandas.api.extensions import register_extension_dtype
>>> from pandas.api.extensions import ExtensionDtype
>>> @register_extension_dtype
... class MyExtensionDtype(ExtensionDtype):
>>> import pandas as pd
>>> @pd.api.extensions.register_extension_dtype
... class MyExtensionDtype(pd.api.extensions.ExtensionDtype):
... name = "myextension"
"""
_registry.register(cls)
Expand Down
8 changes: 4 additions & 4 deletions pandas/core/dtypes/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -1450,15 +1450,15 @@ def is_extension_array_dtype(arr_or_dtype) -> bool:

Examples
--------
>>> from pandas.api.types import is_extension_array_dtype
>>> import pandas as pd
>>> arr = pd.Categorical(['a', 'b'])
>>> is_extension_array_dtype(arr)
>>> pd.api.types.is_extension_array_dtype(arr)
True
>>> is_extension_array_dtype(arr.dtype)
>>> pd.api.types.is_extension_array_dtype(arr.dtype)
True

>>> arr = np.array(['a', 'b'])
>>> is_extension_array_dtype(arr.dtype)
>>> pd.api.types.is_extension_array_dtype(arr.dtype)
False
"""
dtype = getattr(arr_or_dtype, "dtype", arr_or_dtype)
Expand Down
20 changes: 10 additions & 10 deletions pandas/core/dtypes/concat.py
Original file line number Diff line number Diff line change
Expand Up @@ -191,42 +191,42 @@ def union_categoricals(

Examples
--------
>>> from pandas.api.types import union_categoricals
>>> import pandas as pd

If you want to combine categoricals that do not necessarily have
the same categories, `union_categoricals` will combine a list-like
the same categories, `pd.api.types.union_categoricals` will combine a list-like
of categoricals. The new categories will be the union of the
categories being combined.

>>> a = pd.Categorical(["b", "c"])
>>> b = pd.Categorical(["a", "b"])
>>> union_categoricals([a, b])
>>> pd.api.types.union_categoricals([a, b])
['b', 'c', 'a', 'b']
Categories (3, object): ['b', 'c', 'a']

By default, the resulting categories will be ordered as they appear
in the `categories` of the data. If you want the categories to be
lexsorted, use `sort_categories=True` argument.

>>> union_categoricals([a, b], sort_categories=True)
>>> pd.api.types.union_categoricals([a, b], sort_categories=True)
['b', 'c', 'a', 'b']
Categories (3, object): ['a', 'b', 'c']

`union_categoricals` also works with the case of combining two
`pd.api.types.union_categoricals` also works with the case of combining two
categoricals of the same categories and order information (e.g. what
you could also `append` for).

>>> a = pd.Categorical(["a", "b"], ordered=True)
>>> b = pd.Categorical(["a", "b", "a"], ordered=True)
>>> union_categoricals([a, b])
>>> pd.api.types.union_categoricals([a, b])
['a', 'b', 'a', 'b', 'a']
Categories (2, object): ['a' < 'b']

Raises `TypeError` because the categories are ordered and not identical.

>>> a = pd.Categorical(["a", "b"], ordered=True)
>>> b = pd.Categorical(["a", "b", "c"], ordered=True)
>>> union_categoricals([a, b])
>>> pd.api.types.union_categoricals([a, b])
Traceback (most recent call last):
...
TypeError: to union ordered Categoricals, all categories must be the same
Expand All @@ -238,17 +238,17 @@ def union_categoricals(

>>> a = pd.Categorical(["a", "b", "c"], ordered=True)
>>> b = pd.Categorical(["c", "b", "a"], ordered=True)
>>> union_categoricals([a, b], ignore_order=True)
>>> pd.api.types.union_categoricals([a, b], ignore_order=True)
['a', 'b', 'c', 'c', 'b', 'a']
Categories (3, object): ['a', 'b', 'c']

`union_categoricals` also works with a `CategoricalIndex`, or `Series`
`pd.api.types.union_categoricals` also works with a `CategoricalIndex`, or `Series`
containing categorical data, but note that the resulting array will
always be a plain `Categorical`

>>> a = pd.Series(["b", "c"], dtype='category')
>>> b = pd.Series(["a", "b"], dtype='category')
>>> union_categoricals([a, b])
>>> pd.api.types.union_categoricals([a, b])
['b', 'c', 'a', 'b']
Categories (3, object): ['b', 'c', 'a']
"""
Expand Down
12 changes: 6 additions & 6 deletions pandas/core/dtypes/inference.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,20 +51,20 @@ def is_number(obj) -> bool:

Examples
--------
>>> from pandas.api.types import is_number
>>> is_number(1)
>>> import pandas as pd
>>> pd.api.types.is_number(1)
True
>>> is_number(7.15)
>>> pd.api.types.is_number(7.15)
True

Booleans are valid because they are int subclass.

>>> is_number(False)
>>> pd.api.types.is_number(False)
True

>>> is_number("foo")
>>> pd.api.types.is_number("foo")
False
>>> is_number("5")
>>> pd.api.types.is_number("5")
False
"""
return isinstance(obj, (Number, np.number))
Expand Down
8 changes: 4 additions & 4 deletions pandas/core/generic.py
Original file line number Diff line number Diff line change
Expand Up @@ -5681,8 +5681,8 @@ def astype(

Convert to ordered categorical type with custom ordering:

>>> from pandas.api.types import CategoricalDtype
>>> cat_dtype = CategoricalDtype(
>>> import pandas as pd
>>> cat_dtype = pd.api.types.CategoricalDtype(
... categories=[2, 1], ordered=True)
>>> ser.astype(cat_dtype)
0 1
Expand Down Expand Up @@ -8042,10 +8042,10 @@ def resample(

To replace the use of the deprecated `loffset` argument:

>>> from pandas.tseries.frequencies import to_offset
>>> import pandas as pd
>>> loffset = '19min'
>>> ts_out = ts.resample('17min').sum()
>>> ts_out.index = ts_out.index + to_offset(loffset)
>>> ts_out.index = ts_out.index + pd.tseries.frequencies.to_offset(loffset)
>>> ts_out
2000-10-01 23:33:00 0
2000-10-01 23:50:00 9
Expand Down
12 changes: 6 additions & 6 deletions pandas/io/formats/latex.py
Original file line number Diff line number Diff line change
Expand Up @@ -488,9 +488,9 @@ def _select_iterator(self, over: str) -> type[RowStringIterator]:
class LongTableBuilder(GenericTableBuilder):
"""Concrete table builder for longtable.

>>> from pandas import DataFrame
>>> import pandas as pd
>>> from pandas.io.formats import format as fmt
>>> df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
>>> df = pd.DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
>>> formatter = fmt.DataFrameFormatter(df)
>>> builder = LongTableBuilder(formatter, caption='a long table',
... label='tab:long', column_format='lrl')
Expand Down Expand Up @@ -578,9 +578,9 @@ def env_end(self) -> str:
class RegularTableBuilder(GenericTableBuilder):
"""Concrete table builder for regular table.

>>> from pandas import DataFrame
>>> import pandas as pd
>>> from pandas.io.formats import format as fmt
>>> df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
>>> df = pd.DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
>>> formatter = fmt.DataFrameFormatter(df)
>>> builder = RegularTableBuilder(formatter, caption='caption', label='lab',
... column_format='lrc')
Expand Down Expand Up @@ -625,9 +625,9 @@ def env_end(self) -> str:
class TabularBuilder(GenericTableBuilder):
"""Concrete table builder for tabular environment.

>>> from pandas import DataFrame
>>> import pandas as pd
>>> from pandas.io.formats import format as fmt
>>> df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
>>> df = pd.DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
>>> formatter = fmt.DataFrameFormatter(df)
>>> builder = TabularBuilder(formatter, column_format='lrc')
>>> table = builder.get_result()
Expand Down
18 changes: 10 additions & 8 deletions pandas/io/stata.py
Original file line number Diff line number Diff line change
Expand Up @@ -3073,20 +3073,21 @@ class StataWriter117(StataWriter):

Examples
--------
>>> from pandas.io.stata import StataWriter117
>>> import pandas as pd
>>> data = pd.DataFrame([[1.0, 1, 'a']], columns=['a', 'b', 'c'])
>>> writer = StataWriter117('./data_file.dta', data)
>>> writer = pd.io.stata.StataWriter117('./data_file.dta', data)
>>> writer.write_file()

Directly write a zip file
>>> compression = {"method": "zip", "archive_name": "data_file.dta"}
>>> writer = StataWriter117('./data_file.zip', data, compression=compression)
>>> writer = pd.io.stata.StataWriter117('./data_file.zip',
... data, compression=compression)
>>> writer.write_file()

Or with long strings stored in strl format
>>> data = pd.DataFrame([['A relatively long string'], [''], ['']],
... columns=['strls'])
>>> writer = StataWriter117('./data_file_with_long_strings.dta', data,
>>> writer = pd.io.stata.StataWriter117('./data_file_with_long_strings.dta', data,
... convert_strl=['strls'])
>>> writer.write_file()
"""
Expand Down Expand Up @@ -3465,21 +3466,22 @@ class StataWriterUTF8(StataWriter117):
--------
Using Unicode data and column names

>>> from pandas.io.stata import StataWriterUTF8
>>> import pandas as pd
>>> data = pd.DataFrame([[1.0, 1, 'ᴬ']], columns=['a', 'β', 'ĉ'])
>>> writer = StataWriterUTF8('./data_file.dta', data)
>>> writer = pd.io.stata.StataWriterUTF8('./data_file.dta', data)
>>> writer.write_file()

Directly write a zip file
>>> compression = {"method": "zip", "archive_name": "data_file.dta"}
>>> writer = StataWriterUTF8('./data_file.zip', data, compression=compression)
>>> writer = pd.io.stata.StataWriterUTF8('./data_file.zip',
... data, compression=compression)
>>> writer.write_file()

Or with long strings stored in strl format

>>> data = pd.DataFrame([['ᴀ relatively long ŝtring'], [''], ['']],
... columns=['strls'])
>>> writer = StataWriterUTF8('./data_file_with_long_strings.dta', data,
>>> writer = pd.io.stata.StataWriterUTF8('./data_file_with_long_strings.dta', data,
... convert_strl=['strls'])
>>> writer.write_file()
"""
Expand Down
Loading