Skip to content

STY: standardize spacing for casting, with linting #23474

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Nov 4, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions ci/code_checks.sh
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,14 @@ if [[ -z "$CHECK" || "$CHECK" == "lint" ]]; then
flake8 pandas/_libs --filename=*.pxi.in,*.pxd --select=E501,E302,E203,E111,E114,E221,E303,E231,E126,F403
RET=$(($RET + $?)) ; echo $MSG "DONE"

# Check that cython casting is of the form `<type>obj` as opposed to `<type> obj`;
# it doesn't make a difference, but we want to be internally consistent.
# Note: this grep pattern is (intended to be) equivalent to the python
# regex r'(?<![ ->])> '
MSG='Linting .pyx code for spacing conventions in casting' ; echo $MSG
! grep -r -E --include '*.pyx' --include '*.pxi.in' '> ' pandas/_libs | grep -v '[ ->]> '
RET=$(($RET + $?)) ; echo $MSG "DONE"

# readability/casting: Warnings about C casting instead of C++ casting
# runtime/int: Warnings about using C number types instead of C++ ones
# build/include_subdir: Warnings about prefacing included header files with directory
Expand Down
10 changes: 5 additions & 5 deletions pandas/_libs/algos.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ import missing

cdef float64_t FP_ERR = 1e-13

cdef double NaN = <double> np.NaN
cdef double NaN = <double>np.NaN
cdef double nan = NaN

cdef int64_t iNaT = get_nat()
Expand Down Expand Up @@ -242,7 +242,7 @@ def nancorr(ndarray[float64_t, ndim=2] mat, bint cov=0, minp=None):
int64_t nobs = 0
float64_t vx, vy, sumx, sumy, sumxx, sumyy, meanx, meany, divisor

N, K = (<object> mat).shape
N, K = (<object>mat).shape

if minp is None:
minpv = 1
Expand Down Expand Up @@ -307,7 +307,7 @@ def nancorr_spearman(ndarray[float64_t, ndim=2] mat, Py_ssize_t minp=1):
int64_t nobs = 0
float64_t vx, vy, sumx, sumxx, sumyy, mean, divisor

N, K = (<object> mat).shape
N, K = (<object>mat).shape

result = np.empty((K, K), dtype=np.float64)
mask = np.isfinite(mat).view(np.uint8)
Expand Down Expand Up @@ -531,7 +531,7 @@ def pad_2d_inplace(ndarray[algos_t, ndim=2] values,
algos_t val
int lim, fill_count = 0

K, N = (<object> values).shape
K, N = (<object>values).shape

# GH#2778
if N == 0:
Expand Down Expand Up @@ -730,7 +730,7 @@ def backfill_2d_inplace(ndarray[algos_t, ndim=2] values,
algos_t val
int lim, fill_count = 0

K, N = (<object> values).shape
K, N = (<object>values).shape

# GH#2778
if N == 0:
Expand Down
10 changes: 5 additions & 5 deletions pandas/_libs/algos_common_helper.pxi.in
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ def diff_2d_{{name}}(ndarray[{{c_type}}, ndim=2] arr,
cdef:
Py_ssize_t i, j, sx, sy

sx, sy = (<object> arr).shape
sx, sy = (<object>arr).shape
if arr.flags.f_contiguous:
if axis == 0:
if periods >= 0:
Expand Down Expand Up @@ -88,14 +88,14 @@ def put2d_{{name}}_{{dest_name}}(ndarray[{{c_type}}, ndim=2, cast=True] values,
# ensure_dtype
#----------------------------------------------------------------------

cdef int PLATFORM_INT = (<ndarray> np.arange(0, dtype=np.intp)).descr.type_num
cdef int PLATFORM_INT = (<ndarray>np.arange(0, dtype=np.intp)).descr.type_num


def ensure_platform_int(object arr):
# GH3033, GH1392
# platform int is the size of the int pointer, e.g. np.intp
if util.is_array(arr):
if (<ndarray> arr).descr.type_num == PLATFORM_INT:
if (<ndarray>arr).descr.type_num == PLATFORM_INT:
return arr
else:
return arr.astype(np.intp)
Expand All @@ -105,7 +105,7 @@ def ensure_platform_int(object arr):

def ensure_object(object arr):
if util.is_array(arr):
if (<ndarray> arr).descr.type_num == NPY_OBJECT:
if (<ndarray>arr).descr.type_num == NPY_OBJECT:
return arr
else:
return arr.astype(np.object_)
Expand Down Expand Up @@ -142,7 +142,7 @@ def get_dispatch(dtypes):

def ensure_{{name}}(object arr, copy=True):
if util.is_array(arr):
if (<ndarray> arr).descr.type_num == NPY_{{c_type}}:
if (<ndarray>arr).descr.type_num == NPY_{{c_type}}:
return arr
else:
return arr.astype(np.{{dtype}}, copy=copy)
Expand Down
2 changes: 1 addition & 1 deletion pandas/_libs/algos_rank_helper.pxi.in
Original file line number Diff line number Diff line change
Expand Up @@ -263,7 +263,7 @@ def rank_2d_{{dtype}}(object in_arr, axis=0, ties_method='average',
np.putmask(values, mask, nan_value)
{{endif}}

n, k = (<object> values).shape
n, k = (<object>values).shape
ranks = np.empty((n, k), dtype='f8')

{{if dtype == 'object'}}
Expand Down
2 changes: 1 addition & 1 deletion pandas/_libs/algos_take_helper.pxi.in
Original file line number Diff line number Diff line change
Expand Up @@ -278,7 +278,7 @@ cdef _take_2d(ndarray[take_t, ndim=2] values, object idx):
ndarray[take_t, ndim=2] result
object val

N, K = (<object> values).shape
N, K = (<object>values).shape

if take_t is object:
# evaluated at compile-time
Expand Down
10 changes: 5 additions & 5 deletions pandas/_libs/groupby.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ from algos import take_2d_axis1_float64_float64, groupsort_indexer, tiebreakers

cdef int64_t iNaT = get_nat()

cdef double NaN = <double> np.NaN
cdef double NaN = <double>np.NaN
cdef double nan = NaN


Expand Down Expand Up @@ -115,7 +115,7 @@ def group_median_float64(ndarray[float64_t, ndim=2] out,
assert min_count == -1, "'min_count' only used in add and prod"

ngroups = len(counts)
N, K = (<object> values).shape
N, K = (<object>values).shape

indexer, _counts = groupsort_indexer(labels, ngroups)
counts[:] = _counts[1:]
Expand Down Expand Up @@ -152,7 +152,7 @@ def group_cumprod_float64(float64_t[:, :] out,
float64_t[:, :] accum
int64_t lab

N, K = (<object> values).shape
N, K = (<object>values).shape
accum = np.ones_like(values)

with nogil:
Expand Down Expand Up @@ -189,7 +189,7 @@ def group_cumsum(numeric[:, :] out,
numeric[:, :] accum
int64_t lab

N, K = (<object> values).shape
N, K = (<object>values).shape
accum = np.zeros_like(values)

with nogil:
Expand Down Expand Up @@ -226,7 +226,7 @@ def group_shift_indexer(ndarray[int64_t] out, ndarray[int64_t] labels,
int64_t[:] label_seen = np.zeros(ngroups, dtype=np.int64)
int64_t[:, :] label_indexer

N, = (<object> labels).shape
N, = (<object>labels).shape

if periods < 0:
periods = -periods
Expand Down
32 changes: 16 additions & 16 deletions pandas/_libs/groupby_helper.pxi.in
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ def group_add_{{name}}(ndarray[{{c_type}}, ndim=2] out,
nobs = np.zeros_like(out)
sumx = np.zeros_like(out)

N, K = (<object> values).shape
N, K = (<object>values).shape

with nogil:

Expand Down Expand Up @@ -95,7 +95,7 @@ def group_prod_{{name}}(ndarray[{{c_type}}, ndim=2] out,
nobs = np.zeros_like(out)
prodx = np.ones_like(out)

N, K = (<object> values).shape
N, K = (<object>values).shape

with nogil:
for i in range(N):
Expand Down Expand Up @@ -141,7 +141,7 @@ def group_var_{{name}}(ndarray[{{c_type}}, ndim=2] out,
nobs = np.zeros_like(out)
mean = np.zeros_like(out)

N, K = (<object> values).shape
N, K = (<object>values).shape

out[:, :] = 0.0

Expand Down Expand Up @@ -193,7 +193,7 @@ def group_mean_{{name}}(ndarray[{{c_type}}, ndim=2] out,
nobs = np.zeros_like(out)
sumx = np.zeros_like(out)

N, K = (<object> values).shape
N, K = (<object>values).shape

with nogil:
for i in range(N):
Expand Down Expand Up @@ -238,7 +238,7 @@ def group_ohlc_{{name}}(ndarray[{{c_type}}, ndim=2] out,
if len(labels) == 0:
return

N, K = (<object> values).shape
N, K = (<object>values).shape

if out.shape[1] != 4:
raise ValueError('Output array must have 4 columns')
Expand Down Expand Up @@ -312,14 +312,14 @@ def group_last_{{name}}(ndarray[{{c_type}}, ndim=2] out,
if not len(values) == len(labels):
raise AssertionError("len(index) != len(labels)")

nobs = np.zeros((<object> out).shape, dtype=np.int64)
nobs = np.zeros((<object>out).shape, dtype=np.int64)
{{if name == 'object'}}
resx = np.empty((<object> out).shape, dtype=object)
resx = np.empty((<object>out).shape, dtype=object)
{{else}}
resx = np.empty_like(out)
{{endif}}

N, K = (<object> values).shape
N, K = (<object>values).shape

{{if name == "object"}}
if True: # make templating happy
Expand Down Expand Up @@ -369,14 +369,14 @@ def group_nth_{{name}}(ndarray[{{c_type}}, ndim=2] out,
if not len(values) == len(labels):
raise AssertionError("len(index) != len(labels)")

nobs = np.zeros((<object> out).shape, dtype=np.int64)
nobs = np.zeros((<object>out).shape, dtype=np.int64)
{{if name=='object'}}
resx = np.empty((<object> out).shape, dtype=object)
resx = np.empty((<object>out).shape, dtype=object)
{{else}}
resx = np.empty_like(out)
{{endif}}

N, K = (<object> values).shape
N, K = (<object>values).shape

{{if name == "object"}}
if True: # make templating happy
Expand Down Expand Up @@ -462,7 +462,7 @@ def group_rank_{{name}}(ndarray[float64_t, ndim=2] out,

tiebreak = tiebreakers[ties_method]
keep_na = na_option == 'keep'
N, K = (<object> values).shape
N, K = (<object>values).shape
grp_sizes = np.ones_like(out)

# Copy values into new array in order to fill missing data
Expand Down Expand Up @@ -635,7 +635,7 @@ def group_max(ndarray[groupby_t, ndim=2] out,
maxx.fill(-np.inf)
nan_val = NAN

N, K = (<object> values).shape
N, K = (<object>values).shape

with nogil:
for i in range(N):
Expand Down Expand Up @@ -697,7 +697,7 @@ def group_min(ndarray[groupby_t, ndim=2] out,
minx.fill(np.inf)
nan_val = NAN

N, K = (<object> values).shape
N, K = (<object>values).shape

with nogil:
for i in range(N):
Expand Down Expand Up @@ -744,7 +744,7 @@ def group_cummin(ndarray[groupby_t, ndim=2] out,
ndarray[groupby_t, ndim=2] accum
int64_t lab

N, K = (<object> values).shape
N, K = (<object>values).shape
accum = np.empty_like(values)
if groupby_t is int64_t:
accum.fill(_int64_max)
Expand Down Expand Up @@ -792,7 +792,7 @@ def group_cummax(ndarray[groupby_t, ndim=2] out,
ndarray[groupby_t, ndim=2] accum
int64_t lab

N, K = (<object> values).shape
N, K = (<object>values).shape
accum = np.empty_like(values)
if groupby_t is int64_t:
accum.fill(-_int64_max)
Expand Down
2 changes: 1 addition & 1 deletion pandas/_libs/hashtable_class_helper.pxi.in
Original file line number Diff line number Diff line change
Expand Up @@ -318,7 +318,7 @@ cdef class {{name}}HashTable(HashTable):
for i in range(n):
key = keys[i]
k = kh_put_{{dtype}}(self.table, key, &ret)
self.table.vals[k] = <Py_ssize_t> values[i]
self.table.vals[k] = <Py_ssize_t>values[i]

@cython.boundscheck(False)
def map_locations(self, ndarray[{{dtype}}_t, ndim=1] values):
Expand Down
2 changes: 1 addition & 1 deletion pandas/_libs/join.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ from numpy cimport (ndarray,
cnp.import_array()


cdef double NaN = <double> np.NaN
cdef double NaN = <double>np.NaN
cdef double nan = NaN

from pandas._libs.algos import groupsort_indexer, ensure_platform_int
Expand Down
8 changes: 4 additions & 4 deletions pandas/_libs/lib.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -304,7 +304,7 @@ def fast_zip(list ndarrays):

# initialize tuples on first pass
arr = ndarrays[0]
it = <flatiter> PyArray_IterNew(arr)
it = <flatiter>PyArray_IterNew(arr)
for i in range(n):
val = PyArray_GETITEM(arr, PyArray_ITER_DATA(it))
tup = PyTuple_New(k)
Expand All @@ -316,7 +316,7 @@ def fast_zip(list ndarrays):

for j in range(1, k):
arr = ndarrays[j]
it = <flatiter> PyArray_IterNew(arr)
it = <flatiter>PyArray_IterNew(arr)
if len(arr) != n:
raise ValueError('all arrays must be same length')

Expand Down Expand Up @@ -1994,8 +1994,8 @@ def maybe_convert_objects(ndarray[object] objects, bint try_float=0,
break
elif util.is_integer_object(val):
seen.int_ = 1
floats[i] = <float64_t> val
complexes[i] = <double complex> val
floats[i] = <float64_t>val
complexes[i] = <double complex>val
if not seen.null_:
seen.saw_int(int(val))

Expand Down
6 changes: 3 additions & 3 deletions pandas/_libs/missing.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ cimport util
from tslibs.np_datetime cimport get_timedelta64_value, get_datetime64_value
from tslibs.nattype import NaT

cdef double INF = <double> np.inf
cdef double INF = <double>np.inf
cdef double NEGINF = -INF

cdef int64_t NPY_NAT = util.get_nat()
Expand Down Expand Up @@ -224,7 +224,7 @@ def isnaobj2d(ndarray arr):

assert arr.ndim == 2, "'arr' must be 2-D."

n, m = (<object> arr).shape
n, m = (<object>arr).shape
result = np.zeros((n, m), dtype=np.uint8)
for i in range(n):
for j in range(m):
Expand Down Expand Up @@ -268,7 +268,7 @@ def isnaobj2d_old(ndarray arr):

assert arr.ndim == 2, "'arr' must be 2-D."

n, m = (<object> arr).shape
n, m = (<object>arr).shape
result = np.zeros((n, m), dtype=np.uint8)
for i in range(n):
for j in range(m):
Expand Down
Loading