Skip to content

Commit c21235b

Browse files
garanewsdatapythonista
authored andcommitted
DOC: Misc typos fixed in docs and code comments (#28785)
1 parent 181c8aa commit c21235b

File tree

17 files changed

+29
-29
lines changed

17 files changed

+29
-29
lines changed

ci/setup_env.sh

+2-2
Original file line numberDiff line numberDiff line change
@@ -123,8 +123,8 @@ conda list pandas
123123
echo "[Build extensions]"
124124
python setup.py build_ext -q -i
125125

126-
# XXX: Some of our environments end up with old verisons of pip (10.x)
127-
# Adding a new enough verison of pip to the requirements explodes the
126+
# XXX: Some of our environments end up with old versions of pip (10.x)
127+
# Adding a new enough version of pip to the requirements explodes the
128128
# solve time. Just using pip to update itself.
129129
# - py35_macos
130130
# - py35_compat

doc/source/whatsnew/v0.25.0.rst

+3-3
Original file line numberDiff line numberDiff line change
@@ -797,7 +797,7 @@ The columns were lexicographically sorted previously,
797797
798798
The column order now matches the insertion-order of the keys in the ``dict``,
799799
considering all the records from top to bottom. As a consequence, the column
800-
order of the resulting DataFrame has changed compared to previous pandas verisons.
800+
order of the resulting DataFrame has changed compared to previous pandas versions.
801801
802802
.. ipython:: python
803803
@@ -886,7 +886,7 @@ Other API changes
886886
- Using an unsupported version of Beautiful Soup 4 will now raise an ``ImportError`` instead of a ``ValueError`` (:issue:`27063`)
887887
- :meth:`Series.to_excel` and :meth:`DataFrame.to_excel` will now raise a ``ValueError`` when saving timezone aware data. (:issue:`27008`, :issue:`7056`)
888888
- :meth:`ExtensionArray.argsort` places NA values at the end of the sorted array. (:issue:`21801`)
889-
- :meth:`DataFrame.to_hdf` and :meth:`Series.to_hdf` will now raise a ``NotImplementedError`` when saving a :class:`MultiIndex` with extention data types for a ``fixed`` format. (:issue:`7775`)
889+
- :meth:`DataFrame.to_hdf` and :meth:`Series.to_hdf` will now raise a ``NotImplementedError`` when saving a :class:`MultiIndex` with extension data types for a ``fixed`` format. (:issue:`7775`)
890890
- Passing duplicate ``names`` in :meth:`read_csv` will now raise a ``ValueError`` (:issue:`17346`)
891891
892892
.. _whatsnew_0250.deprecations:
@@ -1106,7 +1106,7 @@ Indexing
11061106
11071107
- Improved exception message when calling :meth:`DataFrame.iloc` with a list of non-numeric objects (:issue:`25753`).
11081108
- Improved exception message when calling ``.iloc`` or ``.loc`` with a boolean indexer with different length (:issue:`26658`).
1109-
- Bug in ``KeyError`` exception message when indexing a :class:`MultiIndex` with a non-existant key not displaying the original key (:issue:`27250`).
1109+
- Bug in ``KeyError`` exception message when indexing a :class:`MultiIndex` with a non-existent key not displaying the original key (:issue:`27250`).
11101110
- Bug in ``.iloc`` and ``.loc`` with a boolean indexer not raising an ``IndexError`` when too few items are passed (:issue:`26658`).
11111111
- Bug in :meth:`DataFrame.loc` and :meth:`Series.loc` where ``KeyError`` was not raised for a ``MultiIndex`` when the key was less than or equal to the number of levels in the :class:`MultiIndex` (:issue:`14885`).
11121112
- Bug in which :meth:`DataFrame.append` produced an erroneous warning indicating that a ``KeyError`` will be thrown in the future when the data to be appended contains new columns (:issue:`22252`).

pandas/_libs/tslibs/parsing.pyx

+4-4
Original file line numberDiff line numberDiff line change
@@ -92,7 +92,7 @@ cdef inline object _parse_delimited_date(object date_string, bint dayfirst):
9292
At the beginning function tries to parse date in MM/DD/YYYY format, but
9393
if month > 12 - in DD/MM/YYYY (`dayfirst == False`).
9494
With `dayfirst == True` function makes an attempt to parse date in
95-
DD/MM/YYYY, if an attemp is wrong - in DD/MM/YYYY
95+
DD/MM/YYYY, if an attempt is wrong - in DD/MM/YYYY
9696
9797
Note
9898
----
@@ -730,7 +730,7 @@ class _timelex:
730730
stream = self.stream.replace('\x00', '')
731731

732732
# TODO: Change \s --> \s+ (this doesn't match existing behavior)
733-
# TODO: change the punctuation block to punc+ (doesnt match existing)
733+
# TODO: change the punctuation block to punc+ (does not match existing)
734734
# TODO: can we merge the two digit patterns?
735735
tokens = re.findall('\s|'
736736
'(?<![\.\d])\d+\.\d+(?![\.\d])'
@@ -987,12 +987,12 @@ def _concat_date_cols(tuple date_cols, bint keep_trivial_numbers=True):
987987
keep_trivial_numbers)
988988
PyArray_ITER_NEXT(it)
989989
else:
990-
# create fixed size list - more effecient memory allocation
990+
# create fixed size list - more efficient memory allocation
991991
list_to_join = [None] * col_count
992992
iters = np.zeros(col_count, dtype=object)
993993

994994
# create memoryview of iters ndarray, that will contain some
995-
# flatiter's for each array in `date_cols` - more effecient indexing
995+
# flatiter's for each array in `date_cols` - more efficient indexing
996996
iters_view = iters
997997
for col_idx, array in enumerate(date_cols):
998998
iters_view[col_idx] = PyArray_IterNew(array)

pandas/core/arrays/categorical.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -338,7 +338,7 @@ def __init__(
338338
)
339339
# At this point, dtype is always a CategoricalDtype, but
340340
# we may have dtype.categories be None, and we need to
341-
# infer categories in a factorization step futher below
341+
# infer categories in a factorization step further below
342342

343343
if fastpath:
344344
self._codes = coerce_indexer_dtype(values, dtype.categories)

pandas/core/arrays/datetimelike.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -1441,7 +1441,7 @@ def max(self, axis=None, skipna=True, *args, **kwargs):
14411441
values = self.asi8
14421442

14431443
if not len(values):
1444-
# short-circut for empty max / min
1444+
# short-circuit for empty max / min
14451445
return NaT
14461446

14471447
result = nanops.nanmax(values, skipna=skipna)

pandas/core/frame.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -1271,7 +1271,7 @@ def to_numpy(self, dtype=None, copy=False):
12711271
array([[1, 3],
12721272
[2, 4]])
12731273
1274-
With heterogenous data, the lowest common type will have to
1274+
With heterogeneous data, the lowest common type will have to
12751275
be used.
12761276
12771277
>>> df = pd.DataFrame({"A": [1, 2], "B": [3.0, 4.5]})

pandas/core/indexes/base.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -4417,7 +4417,7 @@ def asof_locs(self, where, mask):
44174417
every entry in the `where` argument.
44184418
44194419
As in the `asof` function, if the label (a particular entry in
4420-
`where`) is not in the index, the latest index label upto the
4420+
`where`) is not in the index, the latest index label up to the
44214421
passed label is chosen and its index returned.
44224422
44234423
If all of the labels in the index are later than a label in `where`,

pandas/core/indexes/interval.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -747,7 +747,7 @@ def _maybe_convert_i8(self, key):
747747
Returns
748748
-------
749749
key: scalar or list-like
750-
The original key if no conversion occured, int if converted scalar,
750+
The original key if no conversion occurred, int if converted scalar,
751751
Int64Index if converted list-like.
752752
"""
753753
original = key

pandas/core/ops/missing.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
"""
22
Missing data handling for arithmetic operations.
33
4-
In particular, pandas conventions regarding divison by zero differ
4+
In particular, pandas conventions regarding division by zero differ
55
from numpy in the following ways:
66
1) np.array([-1, 0, 1], dtype=dtype1) // np.array([0, 0, 0], dtype=dtype2)
77
gives [nan, nan, nan] for most dtype combinations, and [0, 0, 0] for

pandas/io/json/_normalize.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -183,7 +183,7 @@ def json_normalize(
183183
1 {'height': 130, 'weight': 60} NaN Mose Reg
184184
2 {'height': 130, 'weight': 60} 2.0 Faye Raker
185185
186-
Normalizes nested data upto level 1.
186+
Normalizes nested data up to level 1.
187187
188188
>>> data = [{'id': 1,
189189
... 'name': "Cole Volk",

pandas/io/pickle.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -140,7 +140,7 @@ def read_pickle(path, compression="infer"):
140140
path = _stringify_path(path)
141141
f, fh = _get_handle(path, "rb", compression=compression, is_text=False)
142142

143-
# 1) try standard libary Pickle
143+
# 1) try standard library Pickle
144144
# 2) try pickle_compat (older pandas version) to handle subclass changes
145145
# 3) try pickle_compat with latin1 encoding
146146

pandas/io/stata.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -2730,7 +2730,7 @@ def generate_table(self):
27302730
Modifies the DataFrame in-place.
27312731
27322732
The DataFrame returned encodes the (v,o) values as uint64s. The
2733-
encoding depends on teh dta version, and can be expressed as
2733+
encoding depends on the dta version, and can be expressed as
27342734
27352735
enc = v + o * 2 ** (o_size * 8)
27362736

pandas/tests/frame/test_timezones.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ def test_frame_values_with_tz(self):
3737
expected = np.concatenate([expected, expected], axis=1)
3838
tm.assert_numpy_array_equal(result, expected)
3939

40-
# three columns, heterogenous
40+
# three columns, heterogeneous
4141
est = "US/Eastern"
4242
df = df.assign(C=df.A.dt.tz_convert(est))
4343

pandas/tests/indexes/multi/test_copy.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -74,7 +74,7 @@ def test_copy_method(deep):
7474
@pytest.mark.parametrize(
7575
"kwarg, value",
7676
[
77-
("names", ["thrid", "fourth"]),
77+
("names", ["third", "fourth"]),
7878
("levels", [["foo2", "bar2"], ["fizz2", "buzz2"]]),
7979
("codes", [[1, 0, 0, 0], [1, 1, 0, 0]]),
8080
],

pandas/tests/io/formats/test_format.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,7 @@ def filepath_or_buffer_id(request):
5454
@pytest.fixture
5555
def filepath_or_buffer(filepath_or_buffer_id, tmp_path):
5656
"""
57-
A fixture yeilding a string representing a filepath, a path-like object
57+
A fixture yielding a string representing a filepath, a path-like object
5858
and a StringIO buffer. Also checks that buffer is not closed.
5959
"""
6060
if filepath_or_buffer_id == "buffer":

pandas/tests/io/json/test_normalize.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -554,7 +554,7 @@ def test_donot_drop_nonevalues(self):
554554

555555
def test_nonetype_top_level_bottom_level(self):
556556
# GH21158: If inner level json has a key with a null value
557-
# make sure it doesnt do a new_d.pop twice and except
557+
# make sure it does not do a new_d.pop twice and except
558558
data = {
559559
"id": None,
560560
"location": {
@@ -586,7 +586,7 @@ def test_nonetype_top_level_bottom_level(self):
586586

587587
def test_nonetype_multiple_levels(self):
588588
# GH21158: If inner level json has a key with a null value
589-
# make sure it doesnt do a new_d.pop twice and except
589+
# make sure it does not do a new_d.pop twice and except
590590
data = {
591591
"id": None,
592592
"location": {

pandas/tests/tseries/offsets/test_offsets.py

+6-6
Original file line numberDiff line numberDiff line change
@@ -670,7 +670,7 @@ def setup_method(self, method):
670670
self.offset2 = BDay(2)
671671

672672
def test_different_normalize_equals(self):
673-
# GH#21404 changed __eq__ to return False when `normalize` doesnt match
673+
# GH#21404 changed __eq__ to return False when `normalize` does not match
674674
offset = self._offset()
675675
offset2 = self._offset(normalize=True)
676676
assert offset != offset2
@@ -912,7 +912,7 @@ def test_constructor_errors(self, start, end, match):
912912
BusinessHour(start=start, end=end)
913913

914914
def test_different_normalize_equals(self):
915-
# GH#21404 changed __eq__ to return False when `normalize` doesnt match
915+
# GH#21404 changed __eq__ to return False when `normalize` does not match
916916
offset = self._offset()
917917
offset2 = self._offset(normalize=True)
918918
assert offset != offset2
@@ -2278,7 +2278,7 @@ def test_constructor_errors(self):
22782278
CustomBusinessHour(start="14:00:05")
22792279

22802280
def test_different_normalize_equals(self):
2281-
# GH#21404 changed __eq__ to return False when `normalize` doesnt match
2281+
# GH#21404 changed __eq__ to return False when `normalize` does not match
22822282
offset = self._offset()
22832283
offset2 = self._offset(normalize=True)
22842284
assert offset != offset2
@@ -2556,7 +2556,7 @@ def setup_method(self, method):
25562556
self.offset2 = CDay(2)
25572557

25582558
def test_different_normalize_equals(self):
2559-
# GH#21404 changed __eq__ to return False when `normalize` doesnt match
2559+
# GH#21404 changed __eq__ to return False when `normalize` does not match
25602560
offset = self._offset()
25612561
offset2 = self._offset(normalize=True)
25622562
assert offset != offset2
@@ -2827,7 +2827,7 @@ class TestCustomBusinessMonthEnd(CustomBusinessMonthBase, Base):
28272827
_offset = CBMonthEnd
28282828

28292829
def test_different_normalize_equals(self):
2830-
# GH#21404 changed __eq__ to return False when `normalize` doesnt match
2830+
# GH#21404 changed __eq__ to return False when `normalize` does not match
28312831
offset = self._offset()
28322832
offset2 = self._offset(normalize=True)
28332833
assert offset != offset2
@@ -2976,7 +2976,7 @@ class TestCustomBusinessMonthBegin(CustomBusinessMonthBase, Base):
29762976
_offset = CBMonthBegin
29772977

29782978
def test_different_normalize_equals(self):
2979-
# GH#21404 changed __eq__ to return False when `normalize` doesnt match
2979+
# GH#21404 changed __eq__ to return False when `normalize` does not match
29802980
offset = self._offset()
29812981
offset2 = self._offset(normalize=True)
29822982
assert offset != offset2

0 commit comments

Comments
 (0)