Skip to content

CLN, STYLE: remove unused variables and imports in Cython files #48290

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 5 commits into from
Sep 7, 2022
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,10 @@ repos:
hooks:
- id: codespell
types_or: [python, rst, markdown]
- repo: https://github.com/MarcoGorelli/cython-lint
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

just glancing at this, im stoked at the concept!

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

does this have/need eyeballs?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Would certainly help if you're interested, thanks!

rev: v0.1.2
hooks:
- id: cython-lint
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.3.0
hooks:
Expand Down
28 changes: 14 additions & 14 deletions pandas/_libs/algos_common_helper.pxi.in
Original file line number Diff line number Diff line change
Expand Up @@ -35,20 +35,20 @@ def ensure_object(object arr):
{{py:

# name, c_type, dtype
dtypes = [('float64', 'FLOAT64', 'float64'),
# ('float32', 'FLOAT32', 'float32'), # disabling bc unused
('int8', 'INT8', 'int8'),
('int16', 'INT16', 'int16'),
('int32', 'INT32', 'int32'),
('int64', 'INT64', 'int64'),
('uint64', 'UINT64', 'uint64'),
dtypes = [('float64', 'NPY_FLOAT64', 'float64'),
# ('float32', 'NPY_FLOAT32', 'float32'), # disabling bc unused
('int8', 'NPY_INT8', 'int8'),
('int16', 'NPY_INT16', 'int16'),
('int32', 'NPY_INT32', 'int32'),
('int64', 'NPY_INT64', 'int64'),
('uint64', 'NPY_UINT64', 'uint64'),
# Disabling uint and complex dtypes because we do not use them
# (and compiling them increases wheel size) (except uint64)
# ('uint8', 'UINT8', 'uint8'),
# ('uint16', 'UINT16', 'uint16'),
# ('uint32', 'UINT32', 'uint32'),
# ('complex64', 'COMPLEX64', 'complex64'),
# ('complex128', 'COMPLEX128', 'complex128')
# ('uint8', 'NPY_UINT8', 'uint8'),
# ('uint16', 'NPY_UINT16', 'uint16'),
# ('uint32', 'NPY_UINT32', 'uint32'),
# ('complex64', 'NPY_COMPLEX64', 'complex64'),
# ('complex128', 'NPY_COMPLEX128', 'complex128')
]

def get_dispatch(dtypes):
Expand All @@ -62,11 +62,11 @@ def get_dispatch(dtypes):

def ensure_{{name}}(object arr, copy=True):
if util.is_array(arr):
if (<ndarray>arr).descr.type_num == NPY_{{c_type}}:
if (<ndarray>arr).descr.type_num == {{c_type}}:
return arr
else:
# equiv: arr.astype(np.{{dtype}}, copy=copy)
return cnp.PyArray_Cast(<ndarray>arr, cnp.NPY_{{c_type}})
return cnp.PyArray_Cast(<ndarray>arr, cnp.{{c_type}})
else:
return np.array(arr, dtype=np.{{dtype}})

Expand Down
5 changes: 0 additions & 5 deletions pandas/_libs/groupby.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -17,14 +17,10 @@ from numpy cimport (
float32_t,
float64_t,
int8_t,
int16_t,
int32_t,
int64_t,
intp_t,
ndarray,
uint8_t,
uint16_t,
uint32_t,
uint64_t,
)
from numpy.math cimport NAN
Expand All @@ -38,7 +34,6 @@ from pandas._libs.algos cimport (
)

from pandas._libs.algos import (
ensure_platform_int,
groupsort_indexer,
rank_1d,
take_2d_axis1_float64_float64,
Expand Down
2 changes: 0 additions & 2 deletions pandas/_libs/hashing.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@ from numpy cimport (
import_array,
ndarray,
uint8_t,
uint32_t,
uint64_t,
)

Expand Down Expand Up @@ -158,7 +157,6 @@ cdef uint64_t low_level_siphash(uint8_t* data, size_t datalen,
cdef int i
cdef uint8_t* end = data + datalen - (datalen % sizeof(uint64_t))
cdef int left = datalen & 7
cdef int left_byte
cdef int cROUNDS = 2
cdef int dROUNDS = 4

Expand Down
11 changes: 1 addition & 10 deletions pandas/_libs/hashtable.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -15,13 +15,7 @@ from libc.stdlib cimport (
import numpy as np

cimport numpy as cnp
from numpy cimport (
float64_t,
ndarray,
uint8_t,
uint32_t,
)
from numpy.math cimport NAN
from numpy cimport ndarray

cnp.import_array()

Expand All @@ -37,9 +31,6 @@ from pandas._libs.khash cimport (
kh_needed_n_buckets,
kh_python_hash_equal,
kh_python_hash_func,
kh_str_t,
khcomplex64_t,
khcomplex128_t,
khiter_t,
)
from pandas._libs.missing cimport checknull
Expand Down
58 changes: 29 additions & 29 deletions pandas/_libs/hashtable_class_helper.pxi.in
Original file line number Diff line number Diff line change
Expand Up @@ -26,21 +26,21 @@ cdef kh{{name}}_t to_kh{{name}}_t({{name}}_t val) nogil:


# name
c_types = ['khcomplex128_t',
'khcomplex64_t',
'float64_t',
'float32_t',
'int64_t',
'int32_t',
'int16_t',
'int8_t',
'uint64_t',
'uint32_t',
'uint16_t',
'uint8_t']
c_types = [('khcomplex128_t', 'are_equivalent_khcomplex128_t'),
('khcomplex64_t', 'are_equivalent_khcomplex64_t'),
('float64_t', 'are_equivalent_float64_t'),
('float32_t', 'are_equivalent_float32_t'),
('int64_t', 'are_equivalent_int64_t'),
('int32_t', 'are_equivalent_int32_t'),
('int16_t', 'are_equivalent_int16_t'),
('int8_t', 'are_equivalent_int8_t'),
('uint64_t', 'are_equivalent_uint64_t'),
('uint32_t', 'are_equivalent_uint32_t'),
('uint16_t', 'are_equivalent_uint16_t'),
('uint8_t', 'are_equivalent_uint8_t')]
}}

{{for c_type in c_types}}
{{for c_type, are_equivalent_c_type in c_types}}

cdef bint is_nan_{{c_type}}({{c_type}} val) nogil:
{{if c_type in {'khcomplex128_t', 'khcomplex64_t'} }}
Expand All @@ -55,7 +55,7 @@ cdef bint is_nan_{{c_type}}({{c_type}} val) nogil:
{{if c_type in {'khcomplex128_t', 'khcomplex64_t', 'float64_t', 'float32_t'} }}
# are_equivalent_{{c_type}} is cimported via khash.pxd
{{else}}
cdef bint are_equivalent_{{c_type}}({{c_type}} val1, {{c_type}} val2) nogil:
cdef bint {{are_equivalent_c_type}}({{c_type}} val1, {{c_type}} val2) nogil:
return val1 == val2
{{endif}}

Expand Down Expand Up @@ -375,24 +375,24 @@ cdef class HashTable:

{{py:

# name, dtype, c_type, to_c_type
dtypes = [('Complex128', 'complex128', 'khcomplex128_t', 'to_khcomplex128_t'),
('Float64', 'float64', 'float64_t', ''),
('UInt64', 'uint64', 'uint64_t', ''),
('Int64', 'int64', 'int64_t', ''),
('Complex64', 'complex64', 'khcomplex64_t', 'to_khcomplex64_t'),
('Float32', 'float32', 'float32_t', ''),
('UInt32', 'uint32', 'uint32_t', ''),
('Int32', 'int32', 'int32_t', ''),
('UInt16', 'uint16', 'uint16_t', ''),
('Int16', 'int16', 'int16_t', ''),
('UInt8', 'uint8', 'uint8_t', ''),
('Int8', 'int8', 'int8_t', '')]
# name, dtype, c_type, are_equivalent_c_type, to_c_type
dtypes = [('Complex128', 'complex128', 'khcomplex128_t', 'are_equivalent_khcomplex128_t', 'to_khcomplex128_t'),
('Float64', 'float64', 'float64_t', 'are_equivalent_float64_t', ''),
('UInt64', 'uint64', 'uint64_t', 'are_equivalent_uint64_t', ''),
('Int64', 'int64', 'int64_t', 'are_equivalent_int64_t', ''),
('Complex64', 'complex64', 'khcomplex64_t', 'are_equivalent_khcomplex64_t', 'to_khcomplex64_t'),
('Float32', 'float32', 'float32_t', 'are_equivalent_float32_t', ''),
('UInt32', 'uint32', 'uint32_t', 'are_equivalent_uint32_t', ''),
('Int32', 'int32', 'int32_t', 'are_equivalent_int32_t', ''),
('UInt16', 'uint16', 'uint16_t', 'are_equivalent_uint16_t', ''),
('Int16', 'int16', 'int16_t', 'are_equivalent_int16_t', ''),
('UInt8', 'uint8', 'uint8_t', 'are_equivalent_uint8_t', ''),
('Int8', 'int8', 'int8_t', 'are_equivalent_int8_t', '')]

}}


{{for name, dtype, c_type, to_c_type in dtypes}}
{{for name, dtype, c_type, are_equivalent_c_type, to_c_type in dtypes}}

cdef class {{name}}HashTable(HashTable):

Expand Down Expand Up @@ -613,7 +613,7 @@ cdef class {{name}}HashTable(HashTable):
continue
elif ignore_na and (
is_nan_{{c_type}}(val) or
(use_na_value and are_equivalent_{{c_type}}(val, na_value2))
(use_na_value and {{are_equivalent_c_type}}(val, na_value2))
):
# if missing values do not count as unique values (i.e. if
# ignore_na is True), skip the hashtable entry for them,
Expand Down
8 changes: 0 additions & 8 deletions pandas/_libs/index.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -4,17 +4,10 @@ import numpy as np

cimport numpy as cnp
from numpy cimport (
float32_t,
float64_t,
int8_t,
int16_t,
int32_t,
int64_t,
intp_t,
ndarray,
uint8_t,
uint16_t,
uint32_t,
uint64_t,
)

Expand All @@ -35,7 +28,6 @@ from pandas._libs import (

from pandas._libs.lib cimport eq_NA_compat
from pandas._libs.missing cimport (
C_NA as NA,
checknull,
is_matching_na,
)
Expand Down
2 changes: 1 addition & 1 deletion pandas/_libs/internals.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -226,7 +226,7 @@ cdef class BlockPlacement:
"""
cdef:
slice nv, s = self._ensure_has_slice()
Py_ssize_t other_int, start, stop, step
Py_ssize_t start, stop, step
ndarray[intp_t, ndim=1] newarr

if s is not None:
Expand Down
12 changes: 1 addition & 11 deletions pandas/_libs/interval.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -13,15 +13,7 @@ from cpython.datetime cimport (
import_datetime()

cimport cython
from cpython.object cimport (
Py_EQ,
Py_GE,
Py_GT,
Py_LE,
Py_LT,
Py_NE,
PyObject_RichCompare,
)
from cpython.object cimport PyObject_RichCompare
from cython cimport Py_ssize_t

import numpy as np
Expand All @@ -31,9 +23,7 @@ from numpy cimport (
NPY_QUICKSORT,
PyArray_ArgSort,
PyArray_Take,
float32_t,
float64_t,
int32_t,
int64_t,
ndarray,
uint64_t,
Expand Down
1 change: 0 additions & 1 deletion pandas/_libs/lib.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,6 @@ from numpy cimport (
PyArray_IterNew,
complex128_t,
flatiter,
float32_t,
float64_t,
int64_t,
intp_t,
Expand Down
12 changes: 5 additions & 7 deletions pandas/_libs/sparse.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,8 @@ import numpy as np

cimport numpy as cnp
from numpy cimport (
float32_t,
float64_t,
int8_t,
int16_t,
int32_t,
int64_t,
ndarray,
Expand Down Expand Up @@ -127,7 +125,7 @@ cdef class IntIndex(SparseIndex):

cpdef IntIndex intersect(self, SparseIndex y_):
cdef:
Py_ssize_t out_length, xi, yi = 0, result_indexer = 0
Py_ssize_t xi, yi = 0, result_indexer = 0
int32_t xind
ndarray[int32_t, ndim=1] xindices, yindices, new_indices
IntIndex y
Expand Down Expand Up @@ -205,7 +203,7 @@ cdef class IntIndex(SparseIndex):
Vectorized lookup, returns ndarray[int32_t]
"""
cdef:
Py_ssize_t n, i, ind_val
Py_ssize_t n
ndarray[int32_t, ndim=1] inds
ndarray[uint8_t, ndim=1, cast=True] mask
ndarray[int32_t, ndim=1] masked
Expand All @@ -232,7 +230,7 @@ cdef class IntIndex(SparseIndex):

cpdef get_blocks(ndarray[int32_t, ndim=1] indices):
cdef:
Py_ssize_t init_len, i, npoints, result_indexer = 0
Py_ssize_t i, npoints, result_indexer = 0
int32_t block, length = 1, cur, prev
ndarray[int32_t, ndim=1] locs, lens

Expand Down Expand Up @@ -606,7 +604,7 @@ cdef class BlockUnion(BlockMerge):
cdef:
ndarray[int32_t, ndim=1] xstart, xend, ystart
ndarray[int32_t, ndim=1] yend, out_bloc, out_blen
int32_t nstart, nend, diff
int32_t nstart, nend
Py_ssize_t max_len, result_indexer = 0

xstart = self.xstart
Expand Down Expand Up @@ -659,7 +657,7 @@ cdef class BlockUnion(BlockMerge):
"""
cdef:
ndarray[int32_t, ndim=1] xstart, xend, ystart, yend
int32_t xi, yi, xnblocks, ynblocks, nend
int32_t xi, yi, ynblocks, nend

if mode != 0 and mode != 1:
raise Exception('Mode must be 0 or 1')
Expand Down
3 changes: 1 addition & 2 deletions pandas/_libs/tslib.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -474,7 +474,6 @@ cpdef array_to_datetime(
Py_ssize_t i, n = len(values)
object val, tz
ndarray[int64_t] iresult
ndarray[object] oresult
npy_datetimestruct dts
NPY_DATETIMEUNIT out_bestunit
bint utc_convert = bool(utc)
Expand All @@ -489,7 +488,7 @@ cpdef array_to_datetime(
_TSObject _ts
int64_t value
int out_local = 0, out_tzoffset = 0
float offset_seconds, tz_offset
float tz_offset
set out_tzoffset_vals = set()
bint string_to_dts_failed
datetime py_dt
Expand Down
Loading