Skip to content

PEP: use triple-double-quotes rather than triple-single-quotes in doc-strings #12994

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
36 changes: 18 additions & 18 deletions pandas/algos.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -838,9 +838,9 @@ cdef inline kth_smallest_c(float64_t* a, Py_ssize_t k, Py_ssize_t n):


cpdef numeric median(numeric[:] arr):
'''
"""
A faster median
'''
"""
cdef Py_ssize_t n = arr.size

if n == 0:
Expand Down Expand Up @@ -999,7 +999,7 @@ def roll_mean(ndarray[double_t] input,
# Exponentially weighted moving average

def ewma(ndarray[double_t] input, double_t com, int adjust, int ignore_na, int minp):
'''
"""
Compute exponentially-weighted moving average using center-of-mass.

Parameters
Expand All @@ -1013,7 +1013,7 @@ def ewma(ndarray[double_t] input, double_t com, int adjust, int ignore_na, int m
Returns
-------
y : ndarray
'''
"""

cdef Py_ssize_t N = len(input)
cdef ndarray[double_t] output = np.empty(N, dtype=float)
Expand Down Expand Up @@ -1061,7 +1061,7 @@ def ewma(ndarray[double_t] input, double_t com, int adjust, int ignore_na, int m

def ewmcov(ndarray[double_t] input_x, ndarray[double_t] input_y,
double_t com, int adjust, int ignore_na, int minp, int bias):
'''
"""
Compute exponentially-weighted moving variance using center-of-mass.

Parameters
Expand All @@ -1077,7 +1077,7 @@ def ewmcov(ndarray[double_t] input_x, ndarray[double_t] input_y,
Returns
-------
y : ndarray
'''
"""

cdef Py_ssize_t N = len(input_x)
if len(input_y) != N:
Expand Down Expand Up @@ -1761,9 +1761,9 @@ cdef _roll_min_max(ndarray[numeric] a, int window, int minp, bint is_max):

def roll_quantile(ndarray[float64_t, cast=True] input, int win,
int minp, double quantile):
'''
"""
O(N log(window)) implementation using skip list
'''
"""
cdef double val, prev, midpoint
cdef IndexableSkiplist skiplist
cdef Py_ssize_t nobs = 0, i
Expand Down Expand Up @@ -1997,12 +1997,12 @@ def groupby_indices(ndarray values):
@cython.wraparound(False)
@cython.boundscheck(False)
def group_labels(ndarray[object] values):
'''
"""
Compute label vector from input values and associated useful data

Returns
-------
'''
"""
cdef:
Py_ssize_t i, n = len(values)
ndarray[int64_t] labels = np.empty(n, dtype=np.int64)
Expand Down Expand Up @@ -2074,9 +2074,9 @@ def group_nth_object(ndarray[object, ndim=2] out,
ndarray[object, ndim=2] values,
ndarray[int64_t] labels,
int64_t rank):
'''
"""
Only aggregates on axis=0
'''
"""
cdef:
Py_ssize_t i, j, N, K, lab
object val
Expand Down Expand Up @@ -2117,9 +2117,9 @@ def group_nth_bin_object(ndarray[object, ndim=2] out,
ndarray[int64_t] counts,
ndarray[object, ndim=2] values,
ndarray[int64_t] bins, int64_t rank):
'''
"""
Only aggregates on axis=0
'''
"""
cdef:
Py_ssize_t i, j, N, K, ngroups, b
object val
Expand Down Expand Up @@ -2167,9 +2167,9 @@ def group_last_object(ndarray[object, ndim=2] out,
ndarray[int64_t] counts,
ndarray[object, ndim=2] values,
ndarray[int64_t] labels):
'''
"""
Only aggregates on axis=0
'''
"""
cdef:
Py_ssize_t i, j, N, K, lab
object val
Expand Down Expand Up @@ -2209,9 +2209,9 @@ def group_last_bin_object(ndarray[object, ndim=2] out,
ndarray[int64_t] counts,
ndarray[object, ndim=2] values,
ndarray[int64_t] bins):
'''
"""
Only aggregates on axis=0
'''
"""
cdef:
Py_ssize_t i, j, N, K, ngroups, b
object val
Expand Down
12 changes: 6 additions & 6 deletions pandas/compat/chainmap_impl.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ def wrapper(self):


class ChainMap(MutableMapping):
''' A ChainMap groups multiple dicts (or other mappings) together
""" A ChainMap groups multiple dicts (or other mappings) together
to create a single, updateable view.

The underlying mappings are stored in a list. That list is public and can
Expand All @@ -43,13 +43,13 @@ class ChainMap(MutableMapping):
In contrast, writes, updates, and deletions only operate on the first
mapping.

'''
"""

def __init__(self, *maps):
'''Initialize a ChainMap by setting *maps* to the given mappings.
"""Initialize a ChainMap by setting *maps* to the given mappings.
If no mappings are provided, a single empty dictionary is used.

'''
"""
self.maps = list(maps) or [{}] # always at least one map

def __missing__(self, key):
Expand Down Expand Up @@ -101,10 +101,10 @@ def copy(self):
__copy__ = copy

def new_child(self, m=None): # like Django's Context.push()
'''
"""
New ChainMap with a new map followed by all previous maps. If no
map is provided, an empty dict is used.
'''
"""
if m is None:
m = {}
return self.__class__(m, *self.maps)
Expand Down
5 changes: 2 additions & 3 deletions pandas/core/groupby.py
Original file line number Diff line number Diff line change
Expand Up @@ -1531,10 +1531,9 @@ def size(self):

@cache_readonly
def _max_groupsize(self):
'''
"""
Compute size of largest group

'''
"""
# For many items in each group this is much faster than
# self.size().max(), in worst case marginally slower
if self.indices:
Expand Down
4 changes: 2 additions & 2 deletions pandas/core/series.py
Original file line number Diff line number Diff line change
Expand Up @@ -1129,15 +1129,15 @@ def to_sparse(self, kind='block', fill_value=None):
fill_value=fill_value).__finalize__(self)

def _set_name(self, name, inplace=False):
'''
"""
Set the Series name.

Parameters
----------
name : str
inplace : bool
whether to modify `self` directly or return a copy
'''
"""
ser = self if inplace else self.copy()
ser.name = name
return ser
Expand Down
8 changes: 4 additions & 4 deletions pandas/index.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -101,9 +101,9 @@ cdef class IndexEngine:
return val in self.mapping

cpdef get_value(self, ndarray arr, object key, object tz=None):
'''
"""
arr : 1-dimensional ndarray
'''
"""
cdef:
object loc
void* data_ptr
Expand All @@ -119,9 +119,9 @@ cdef class IndexEngine:
return util.get_value_at(arr, loc)

cpdef set_value(self, ndarray arr, object key, object value):
'''
"""
arr : 1-dimensional ndarray
'''
"""
cdef:
object loc
void* data_ptr
Expand Down
4 changes: 2 additions & 2 deletions pandas/indexes/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -2569,10 +2569,10 @@ def _join_level(self, other, level, how='left', return_indexers=False,
from .multi import MultiIndex

def _get_leaf_sorter(labels):
'''
"""
returns sorter for the inner most level while preserving the
order of higher levels
'''
"""
if labels[0].size == 0:
return np.empty(0, dtype='int64')

Expand Down
2 changes: 1 addition & 1 deletion pandas/indexes/multi.py
Original file line number Diff line number Diff line change
Expand Up @@ -1528,7 +1528,7 @@ def get_loc(self, key, method=None):
'currently supported for MultiIndex')

def _maybe_to_slice(loc):
'''convert integer indexer to boolean mask or slice if possible'''
"""convert integer indexer to boolean mask or slice if possible"""
if not isinstance(loc, np.ndarray) or loc.dtype != 'int64':
return loc

Expand Down
4 changes: 0 additions & 4 deletions pandas/io/tests/test_sql.py
Original file line number Diff line number Diff line change
Expand Up @@ -2385,8 +2385,6 @@ def test_uquery(self):
sys.stdout = sys.__stdout__

def test_keyword_as_column_names(self):
'''
'''
df = DataFrame({'From': np.ones(5)})
sql.to_sql(df, con=self.conn, name='testkeywords', index=False)

Expand Down Expand Up @@ -2751,8 +2749,6 @@ def test_uquery(self):
sys.stdout = sys.__stdout__

def test_keyword_as_column_names(self):
'''
'''
_skip_if_no_pymysql()
df = DataFrame({'From': np.ones(5)})
sql.to_sql(df, con=self.conn, name='testkeywords',
Expand Down
8 changes: 4 additions & 4 deletions pandas/io/wb.py
Original file line number Diff line number Diff line change
Expand Up @@ -228,8 +228,8 @@ def _get_data(indicator="NY.GNS.ICTR.GN.ZS", country='US',
return out,"Success"

def get_countries():
'''Query information about countries
'''
"""Query information about countries
"""
url = 'http://api.worldbank.org/countries/?per_page=1000&format=json'
with urlopen(url) as response:
data = response.read()
Expand All @@ -243,8 +243,8 @@ def get_countries():
return data

def get_indicators():
'''Download information about all World Bank data series
'''
"""Download information about all World Bank data series
"""
url = 'http://api.worldbank.org/indicators?per_page=50000&format=json'
with urlopen(url) as response:
data = response.read()
Expand Down
28 changes: 14 additions & 14 deletions pandas/lib.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -95,15 +95,15 @@ def values_from_object(object o):
return o

cpdef map_indices_list(list index):
'''
"""
Produce a dict mapping the values of the input array to their respective
locations.

Example:
array(['hi', 'there']) --> {'hi' : 0 , 'there' : 1}

Better to do this with Cython because of the enormous speed boost.
'''
"""
cdef Py_ssize_t i, length
cdef dict result = {}

Expand Down Expand Up @@ -134,7 +134,7 @@ def ismember_nans(float64_t[:] arr, set values, bint hasnans):


def ismember(ndarray arr, set values):
'''
"""
Checks whether

Parameters
Expand All @@ -145,7 +145,7 @@ def ismember(ndarray arr, set values):
Returns
-------
ismember : ndarray (boolean dtype)
'''
"""
cdef:
Py_ssize_t i, n
ndarray[uint8_t] result
Expand All @@ -160,7 +160,7 @@ def ismember(ndarray arr, set values):
return result.view(np.bool_)

def ismember_int64(ndarray[int64_t] arr, set values):
'''
"""
Checks whether

Parameters
Expand All @@ -171,7 +171,7 @@ def ismember_int64(ndarray[int64_t] arr, set values):
Returns
-------
ismember : ndarray (boolean dtype)
'''
"""
cdef:
Py_ssize_t i, n
ndarray[uint8_t] result
Expand Down Expand Up @@ -404,10 +404,10 @@ def isnullobj2d_old(ndarray[object, ndim=2] arr):
@cython.wraparound(False)
@cython.boundscheck(False)
cpdef ndarray[object] list_to_object_array(list obj):
'''
"""
Convert list to object ndarray. Seriously can\'t believe I had to write this
function
'''
"""
cdef:
Py_ssize_t i, n = len(obj)
ndarray[object] arr = np.empty(n, dtype=object)
Expand Down Expand Up @@ -542,9 +542,9 @@ def dicts_to_array(list dicts, list columns):
return result

def fast_zip(list ndarrays):
'''
"""
For zipping multiple ndarrays into an ndarray of tuples
'''
"""
cdef:
Py_ssize_t i, j, k, n
ndarray[object] result
Expand Down Expand Up @@ -959,9 +959,9 @@ cpdef ndarray[object] astype_str(ndarray arr):
return result

def clean_index_list(list obj):
'''
"""
Utility used in pandas.core.index._ensure_index
'''
"""
cdef:
ndarray[object] converted
Py_ssize_t i, n = len(obj)
Expand Down Expand Up @@ -1325,9 +1325,9 @@ cdef class _PandasNull:
pandas_null = _PandasNull()

def fast_zip_fillna(list ndarrays, fill_value=pandas_null):
'''
"""
For zipping multiple ndarrays into an ndarray of tuples
'''
"""
cdef:
Py_ssize_t i, j, k, n
ndarray[object] result
Expand Down
4 changes: 2 additions & 2 deletions pandas/parser.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -248,11 +248,11 @@ _NA_VALUES = [b'-1.#IND', b'1.#QNAN', b'1.#IND', b'-1.#QNAN',


cdef class TextReader:
'''
"""

# source: StringIO or file object

'''
"""

cdef:
parser_t *parser
Expand Down
Loading