Skip to content

Sync Fork from Upstream Repo #30

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 16 commits into from
Jan 31, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion doc/source/development/contributing.rst
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ check each issue individually, and it's not possible to find the unassigned ones

For this reason, we implemented a workaround consisting of adding a comment with the exact
text `take`. When you do it, a GitHub action will automatically assign you the issue
(this will take seconds, and may require refreshint the page to see it).
(this will take seconds, and may require refreshing the page to see it).
By doing this, it's possible to filter the list of issues and find only the unassigned ones.

So, a good way to find an issue to start contributing to pandas is to check the list of
Expand Down
4 changes: 2 additions & 2 deletions doc/source/whatsnew/v1.1.0.rst
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ Other API changes

- :meth:`Series.describe` will now show distribution percentiles for ``datetime`` dtypes, statistics ``first`` and ``last``
will now be ``min`` and ``max`` to match with numeric dtypes in :meth:`DataFrame.describe` (:issue:`30164`)
-
- :meth:`Groupby.groups` now returns an abbreviated representation when called on large dataframes (:issue:`1135`)

Backwards incompatible API changes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Expand Down Expand Up @@ -148,7 +148,7 @@ Indexing
^^^^^^^^
- Bug in slicing on a :class:`DatetimeIndex` with a partial-timestamp dropping high-resolution indices near the end of a year, quarter, or month (:issue:`31064`)
- Bug in :meth:`PeriodIndex.get_loc` treating higher-resolution strings differently from :meth:`PeriodIndex.get_value` (:issue:`31172`)
-
- Bug in :meth:`Series.at` and :meth:`DataFrame.at` not matching ``.loc`` behavior when looking up an integer in a :class:`Float64Index` (:issue:`31329`)

Missing
^^^^^^^
Expand Down
1 change: 1 addition & 0 deletions pandas/_typing.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@
# other

Dtype = Union[str, np.dtype, "ExtensionDtype"]
DtypeObj = Union[np.dtype, "ExtensionDtype"]
FilePathOrBuffer = Union[str, Path, IO[AnyStr]]

# FrameOrSeriesUnion means either a DataFrame or a Series. E.g.
Expand Down
70 changes: 34 additions & 36 deletions pandas/core/base.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
"""
Base and utility classes for pandas objects.
"""

import builtins
import textwrap
from typing import Dict, FrozenSet, List, Optional, Union
Expand Down Expand Up @@ -45,11 +46,15 @@


class PandasObject(DirNamesMixin):
"""baseclass for various pandas objects"""
"""
Baseclass for various pandas objects.
"""

@property
def _constructor(self):
"""class constructor (for this class it's just `__class__`"""
"""
Class constructor (for this class it's just `__class__`.
"""
return type(self)

def __repr__(self) -> str:
Expand Down Expand Up @@ -77,16 +82,14 @@ def __sizeof__(self):
"""
if hasattr(self, "memory_usage"):
mem = self.memory_usage(deep=True)
if not is_scalar(mem):
mem = mem.sum()
return int(mem)
return int(mem if is_scalar(mem) else mem.sum())

# no memory_usage attribute, so fall back to
# object's 'sizeof'
# no memory_usage attribute, so fall back to object's 'sizeof'
return super().__sizeof__()

def _ensure_type(self: T, obj) -> T:
"""Ensure that an object has same type as self.
"""
Ensure that an object has same type as self.

Used by type checkers.
"""
Expand All @@ -95,7 +98,8 @@ def _ensure_type(self: T, obj) -> T:


class NoNewAttributesMixin:
"""Mixin which prevents adding new attributes.
"""
Mixin which prevents adding new attributes.

Prevents additional attributes via xxx.attribute = "something" after a
call to `self.__freeze()`. Mainly used to prevent the user from using
Expand All @@ -106,7 +110,9 @@ class NoNewAttributesMixin:
"""

def _freeze(self):
"""Prevents setting additional attributes"""
"""
Prevents setting additional attributes.
"""
object.__setattr__(self, "__frozen", True)

# prevent adding any attribute via s.xxx.new_attribute = ...
Expand Down Expand Up @@ -180,14 +186,12 @@ class SelectionMixin:
@property
def _selection_name(self):
"""
return a name for myself; this would ideally be called
the 'name' property, but we cannot conflict with the
Series.name property which can be set
Return a name for myself;

This would ideally be called the 'name' property,
but we cannot conflict with the Series.name property which can be set.
"""
if self._selection is None:
return None # 'result'
else:
return self._selection
return self._selection

@property
def _selection_list(self):
Expand All @@ -199,7 +203,6 @@ def _selection_list(self):

@cache_readonly
def _selected_obj(self):

if self._selection is None or isinstance(self.obj, ABCSeries):
return self.obj
else:
Expand Down Expand Up @@ -246,12 +249,11 @@ def _gotitem(self, key, ndim: int, subset=None):

Parameters
----------
key : string / list of selections
key : str / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on

"""
raise AbstractMethodError(self)

Expand All @@ -266,7 +268,6 @@ def _try_aggregate_string_function(self, arg: str, *args, **kwargs):
- try to find a function (or attribute) on ourselves
- try to find a numpy function
- raise

"""
assert isinstance(arg, str)

Expand Down Expand Up @@ -585,7 +586,6 @@ def _shallow_copy(self, obj, **kwargs):
"""
return a new object with the replacement attributes
"""

if isinstance(obj, self._constructor):
obj = obj.obj
for attr in self._attributes:
Expand Down Expand Up @@ -669,8 +669,7 @@ def item(self):

if len(self) == 1:
return next(iter(self))
else:
raise ValueError("can only convert an array of size 1 to a Python scalar")
raise ValueError("can only convert an array of size 1 to a Python scalar")

@property
def nbytes(self) -> int:
Expand Down Expand Up @@ -735,7 +734,6 @@ def array(self) -> ExtensionArray:

Examples
--------

For regular NumPy types like int, and float, a PandasArray
is returned.

Expand Down Expand Up @@ -851,12 +849,11 @@ def to_numpy(self, dtype=None, copy=False, na_value=lib.no_default, **kwargs):
"""
if is_extension_array_dtype(self.dtype):
return self.array.to_numpy(dtype, copy=copy, na_value=na_value, **kwargs)
else:
if kwargs:
msg = "to_numpy() got an unexpected keyword argument '{}'".format(
list(kwargs.keys())[0]
)
raise TypeError(msg)
elif kwargs:
bad_keys = list(kwargs.keys())[0]
raise TypeError(
f"to_numpy() got an unexpected keyword argument '{bad_keys}'"
)

result = np.asarray(self._values, dtype=dtype)
# TODO(GH-24345): Avoid potential double copy
Expand Down Expand Up @@ -1076,7 +1073,9 @@ def _reduce(
filter_type=None,
**kwds,
):
""" perform the reduction type operation if we can """
"""
Perform the reduction type operation if we can.
"""
func = getattr(self, name, None)
if func is None:
raise TypeError(
Expand All @@ -1103,9 +1102,7 @@ def _map_values(self, mapper, na_action=None):
The output of the mapping function applied to the index.
If the function returns a tuple with more than one element
a MultiIndex will be returned.

"""

# we can fastpath dict/Series to an efficient map
# as we know that we are not going to have to yield
# python types
Expand Down Expand Up @@ -1341,7 +1338,9 @@ def is_monotonic(self) -> bool:

@property
def is_monotonic_increasing(self) -> bool:
"""alias for is_monotonic"""
"""
Alias for is_monotonic.
"""
# mypy complains if we alias directly
return self.is_monotonic

Expand Down Expand Up @@ -1455,7 +1454,6 @@ def factorize(self, sort=False, na_sentinel=-1):

Examples
--------

>>> x = pd.Series([1, 2, 3])
>>> x
0 1
Expand Down
10 changes: 0 additions & 10 deletions pandas/core/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,16 +72,6 @@ def consensus_name_attr(objs):
return name


def maybe_box(indexer, values, obj, key):

# if we have multiples coming back, box em
if isinstance(values, np.ndarray):
return obj[indexer.get_loc(key)]

# return the value
return values


def maybe_box_datetimelike(value):
# turn a datetime like into a Timestamp/timedelta as needed

Expand Down
Loading