Skip to content

CLN: sqlalchemy deprecation, annotations, share shape properties #40491

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
Mar 20, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions pandas/_libs/intervaltree.pxi.in
Original file line number Diff line number Diff line change
Expand Up @@ -238,6 +238,8 @@ NODE_CLASSES = {}
{{for dtype, dtype_title, closed, closed_title, cmp_left, cmp_right,
cmp_left_converse, cmp_right_converse, fused_prefix in nodes}}


@cython.internal
cdef class {{dtype_title}}Closed{{closed_title}}IntervalNode:
"""Non-terminal node for an IntervalTree

Expand Down
5 changes: 3 additions & 2 deletions pandas/_libs/lib.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -1120,6 +1120,7 @@ except AttributeError:
pass


@cython.internal
cdef class Seen:
"""
Class for keeping track of the types of elements
Expand Down Expand Up @@ -2580,7 +2581,7 @@ def tuples_to_object_array(ndarray[object] tuples):
return result


def to_object_array_tuples(rows: object):
def to_object_array_tuples(rows: object) -> np.ndarray:
"""
Convert a list of tuples into an object array. Any subclass of
tuple in `rows` will be casted to tuple.
Expand All @@ -2592,7 +2593,7 @@ def to_object_array_tuples(rows: object):

Returns
-------
numpy array of the object dtype.
np.ndarray[object, ndim=2]
"""
cdef:
Py_ssize_t i, j, n, k, tmp
Expand Down
8 changes: 4 additions & 4 deletions pandas/_libs/ops.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ from pandas._libs.util cimport (

@cython.wraparound(False)
@cython.boundscheck(False)
def scalar_compare(object[:] values, object val, object op):
def scalar_compare(object[:] values, object val, object op) -> ndarray:
"""
Compare each element of `values` array with the scalar `val`, with
the comparison operation described by `op`.
Expand Down Expand Up @@ -114,7 +114,7 @@ def scalar_compare(object[:] values, object val, object op):

@cython.wraparound(False)
@cython.boundscheck(False)
def vec_compare(ndarray[object] left, ndarray[object] right, object op):
def vec_compare(ndarray[object] left, ndarray[object] right, object op) -> ndarray:
"""
Compare the elements of `left` with the elements of `right` pointwise,
with the comparison operation described by `op`.
Expand Down Expand Up @@ -180,7 +180,7 @@ def vec_compare(ndarray[object] left, ndarray[object] right, object op):

@cython.wraparound(False)
@cython.boundscheck(False)
def scalar_binop(object[:] values, object val, object op):
def scalar_binop(object[:] values, object val, object op) -> ndarray:
"""
Apply the given binary operator `op` between each element of the array
`values` and the scalar `val`.
Expand Down Expand Up @@ -217,7 +217,7 @@ def scalar_binop(object[:] values, object val, object op):

@cython.wraparound(False)
@cython.boundscheck(False)
def vec_binop(object[:] left, object[:] right, object op):
def vec_binop(object[:] left, object[:] right, object op) -> ndarray:
"""
Apply the given binary operator `op` pointwise to the elements of
arrays `left` and `right`.
Expand Down
7 changes: 5 additions & 2 deletions pandas/_libs/reshape.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ ctypedef fused reshape_t:
@cython.boundscheck(False)
def unstack(reshape_t[:, :] values, const uint8_t[:] mask,
Py_ssize_t stride, Py_ssize_t length, Py_ssize_t width,
reshape_t[:, :] new_values, uint8_t[:, :] new_mask):
reshape_t[:, :] new_values, uint8_t[:, :] new_mask) -> None:
"""
Transform long values to wide new_values.

Expand Down Expand Up @@ -111,7 +111,10 @@ def explode(ndarray[object] values):

Returns
-------
tuple(values, counts)
ndarray[object]
result
ndarray[int64_t]
counts
"""
cdef:
Py_ssize_t i, j, count, n
Expand Down
2 changes: 1 addition & 1 deletion pandas/_libs/tslibs/nattype.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -286,7 +286,7 @@ cdef class _NaT(datetime):
# This allows Timestamp(ts.isoformat()) to always correctly roundtrip.
return "NaT"

def __hash__(self):
def __hash__(self) -> int:
return NPY_NAT

@property
Expand Down
8 changes: 4 additions & 4 deletions pandas/_libs/tslibs/vectorized.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ def ints_to_pydatetime(
object freq=None,
bint fold=False,
str box="datetime"
):
) -> np.ndarray:
"""
Convert an i8 repr to an ndarray of datetimes, date, time or Timestamp.

Expand All @@ -116,7 +116,7 @@ def ints_to_pydatetime(

Returns
-------
ndarray of dtype specified by box
ndarray[object] of type specified by box
"""
cdef:
Py_ssize_t i, n = len(arr)
Expand Down Expand Up @@ -223,7 +223,7 @@ cdef inline int _reso_stamp(npy_datetimestruct *dts):
return RESO_DAY


def get_resolution(const int64_t[:] stamps, tzinfo tz=None):
def get_resolution(const int64_t[:] stamps, tzinfo tz=None) -> Resolution:
cdef:
Py_ssize_t i, n = len(stamps)
npy_datetimestruct dts
Expand Down Expand Up @@ -332,7 +332,7 @@ cpdef ndarray[int64_t] normalize_i8_timestamps(const int64_t[:] stamps, tzinfo t

@cython.wraparound(False)
@cython.boundscheck(False)
def is_date_array_normalized(const int64_t[:] stamps, tzinfo tz=None):
def is_date_array_normalized(const int64_t[:] stamps, tzinfo tz=None) -> bool:
"""
Check if all of the given (nanosecond) timestamps are normalized to
midnight, i.e. hour == minute == second == 0. If the optional timezone
Expand Down
2 changes: 1 addition & 1 deletion pandas/_libs/writers.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ def write_csv_rows(

@cython.boundscheck(False)
@cython.wraparound(False)
def convert_json_to_lines(arr: object) -> str:
def convert_json_to_lines(arr: str) -> str:
"""
replace comma separated json with line feeds, paying special attention
to quotes & brackets
Expand Down
10 changes: 7 additions & 3 deletions pandas/core/arrays/_mixins.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,12 +8,16 @@
Type,
TypeVar,
Union,
cast,
)

import numpy as np

from pandas._libs import lib
from pandas._typing import Shape
from pandas._typing import (
F,
Shape,
)
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
from pandas.util._decorators import (
Expand Down Expand Up @@ -41,7 +45,7 @@
)


def ravel_compat(meth):
def ravel_compat(meth: F) -> F:
"""
Decorator to ravel a 2D array before passing it to a cython operation,
then reshape the result to our own shape.
Expand All @@ -58,7 +62,7 @@ def method(self, *args, **kwargs):
order = "F" if flags.f_contiguous else "C"
return result.reshape(self.shape, order=order)

return method
return cast(F, method)


class NDArrayBackedExtensionArray(ExtensionArray):
Expand Down
7 changes: 3 additions & 4 deletions pandas/core/indexes/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -6130,15 +6130,14 @@ def _maybe_disable_logical_methods(self, opname: str_t):
# This call will raise
make_invalid_op(opname)(self)

@final
@property
def shape(self) -> Shape:
"""
Return a tuple of the shape of the underlying data.
"""
# not using "(len(self), )" to return "correct" shape if the values
# consists of a >1 D array (see GH-27775)
# overridden in MultiIndex.shape to avoid materializing the values
return self._values.shape
# See GH#27775, GH#27384 for history/reasoning in how this is defined.
return (len(self),)


def ensure_index_from_sequences(sequences, names=None):
Expand Down
9 changes: 0 additions & 9 deletions pandas/core/indexes/multi.py
Original file line number Diff line number Diff line change
Expand Up @@ -752,15 +752,6 @@ def dtypes(self) -> Series:
}
)

@property
def shape(self) -> Shape:
"""
Return a tuple of the shape of the underlying data.
"""
# overriding the base Index.shape definition to avoid materializing
# the values (GH-27384, GH-27775)
return (len(self),)

def __len__(self) -> int:
return len(self.codes[0])

Expand Down
6 changes: 3 additions & 3 deletions pandas/core/indexes/range.py
Original file line number Diff line number Diff line change
Expand Up @@ -450,7 +450,7 @@ def take(
**kwargs,
)

def tolist(self):
def tolist(self) -> list[int]:
return list(self._range)

@doc(Int64Index.__iter__)
Expand Down Expand Up @@ -494,13 +494,13 @@ def _minmax(self, meth: str):

return self.start + self.step * no_steps

def min(self, axis=None, skipna=True, *args, **kwargs) -> int:
def min(self, axis=None, skipna: bool = True, *args, **kwargs) -> int:
"""The minimum value of the RangeIndex"""
nv.validate_minmax_axis(axis)
nv.validate_min(args, kwargs)
return self._minmax("min")

def max(self, axis=None, skipna=True, *args, **kwargs) -> int:
def max(self, axis=None, skipna: bool = True, *args, **kwargs) -> int:
"""The maximum value of the RangeIndex"""
nv.validate_minmax_axis(axis)
nv.validate_max(args, kwargs)
Expand Down
9 changes: 5 additions & 4 deletions pandas/core/util/hashing.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ def combine_hash_arrays(arrays: Iterator[np.ndarray], num_items: int) -> np.ndar

Returns
-------
np.ndarray[int64]
np.ndarray[uint64]

Should be the same as CPython's tupleobject.c
"""
Expand Down Expand Up @@ -184,7 +184,7 @@ def hash_tuples(

Returns
-------
ndarray of hashed values array
ndarray[np.uint64] of hashed values
"""
if not is_list_like(vals):
raise TypeError("must be convertible to a list-of-tuples")
Expand Down Expand Up @@ -227,7 +227,7 @@ def _hash_categorical(cat: Categorical, encoding: str, hash_key: str) -> np.ndar

Returns
-------
ndarray of hashed values array, same size as len(c)
ndarray[np.uint64] of hashed values, same size as len(c)
"""
# Convert ExtensionArrays to ndarrays
values = np.asarray(cat.categories._values)
Expand Down Expand Up @@ -274,7 +274,8 @@ def hash_array(

Returns
-------
1d uint64 numpy array of hash values, same length as the vals
ndarray[np.uint64, ndim=1]
Hashed values, same length as the vals.
"""
if not hasattr(vals, "dtype"):
raise TypeError("must pass a ndarray-like")
Expand Down
2 changes: 1 addition & 1 deletion pandas/io/json/_normalize.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@
from pandas import DataFrame


def convert_to_line_delimits(s):
def convert_to_line_delimits(s: str) -> str:
"""
Helper function that converts JSON lists to line delimited JSON.
"""
Expand Down
10 changes: 8 additions & 2 deletions pandas/tests/io/test_sql.py
Original file line number Diff line number Diff line change
Expand Up @@ -1991,8 +1991,14 @@ def bar(connection, data):
def main(connectable):
with connectable.connect() as conn:
with conn.begin():
foo_data = conn.run_callable(foo)
conn.run_callable(bar, foo_data)
if _gt14():
# https://github.com/sqlalchemy/sqlalchemy/commit/
# 00b5c10846e800304caa86549ab9da373b42fa5d#r48323973
foo_data = foo(conn)
bar(conn, foo_data)
else:
foo_data = conn.run_callable(foo)
conn.run_callable(bar, foo_data)

DataFrame({"test_foo_data": [0, 1, 2]}).to_sql("test_foo_data", self.conn)
main(self.conn)
Expand Down