Skip to content

Commit ed6b3b6

Browse files
committed
Merge remote-tracking branch 'upstream/master' into sa-errors
2 parents 778c2c1 + f20331d commit ed6b3b6

File tree

197 files changed

+5338
-4312
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

197 files changed

+5338
-4312
lines changed

asv_bench/benchmarks/frame_ctor.py

+45
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
import numpy as np
22

3+
import pandas as pd
34
from pandas import DataFrame, MultiIndex, Series, Timestamp, date_range
45

56
from .pandas_vb_common import tm
@@ -118,4 +119,48 @@ def time_frame_from_range(self):
118119
self.df = DataFrame(self.data)
119120

120121

122+
class FromArrays:
123+
124+
goal_time = 0.2
125+
126+
def setup(self):
127+
N_rows = 1000
128+
N_cols = 1000
129+
self.float_arrays = [np.random.randn(N_rows) for _ in range(N_cols)]
130+
self.sparse_arrays = [
131+
pd.arrays.SparseArray(np.random.randint(0, 2, N_rows), dtype="float64")
132+
for _ in range(N_cols)
133+
]
134+
self.int_arrays = [
135+
pd.array(np.random.randint(1000, size=N_rows), dtype="Int64")
136+
for _ in range(N_cols)
137+
]
138+
self.index = pd.Index(range(N_rows))
139+
self.columns = pd.Index(range(N_cols))
140+
141+
def time_frame_from_arrays_float(self):
142+
self.df = DataFrame._from_arrays(
143+
self.float_arrays,
144+
index=self.index,
145+
columns=self.columns,
146+
verify_integrity=False,
147+
)
148+
149+
def time_frame_from_arrays_int(self):
150+
self.df = DataFrame._from_arrays(
151+
self.int_arrays,
152+
index=self.index,
153+
columns=self.columns,
154+
verify_integrity=False,
155+
)
156+
157+
def time_frame_from_arrays_sparse(self):
158+
self.df = DataFrame._from_arrays(
159+
self.sparse_arrays,
160+
index=self.index,
161+
columns=self.columns,
162+
verify_integrity=False,
163+
)
164+
165+
121166
from .pandas_vb_common import setup # noqa: F401 isort:skip

asv_bench/benchmarks/rolling.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ class Methods:
1111
["int", "float"],
1212
["median", "mean", "max", "min", "std", "count", "skew", "kurt", "sum"],
1313
)
14-
param_names = ["contructor", "window", "dtype", "method"]
14+
param_names = ["constructor", "window", "dtype", "method"]
1515

1616
def setup(self, constructor, window, dtype, method):
1717
N = 10 ** 5
@@ -72,7 +72,7 @@ class ExpandingMethods:
7272
["int", "float"],
7373
["median", "mean", "max", "min", "std", "count", "skew", "kurt", "sum"],
7474
)
75-
param_names = ["contructor", "window", "dtype", "method"]
75+
param_names = ["constructor", "window", "dtype", "method"]
7676

7777
def setup(self, constructor, dtype, method):
7878
N = 10 ** 5
@@ -86,7 +86,7 @@ def time_expanding(self, constructor, dtype, method):
8686
class EWMMethods:
8787

8888
params = (["DataFrame", "Series"], [10, 1000], ["int", "float"], ["mean", "std"])
89-
param_names = ["contructor", "window", "dtype", "method"]
89+
param_names = ["constructor", "window", "dtype", "method"]
9090

9191
def setup(self, constructor, window, dtype, method):
9292
N = 10 ** 5
@@ -104,7 +104,7 @@ class VariableWindowMethods(Methods):
104104
["int", "float"],
105105
["median", "mean", "max", "min", "std", "count", "skew", "kurt", "sum"],
106106
)
107-
param_names = ["contructor", "window", "dtype", "method"]
107+
param_names = ["constructor", "window", "dtype", "method"]
108108

109109
def setup(self, constructor, window, dtype, method):
110110
N = 10 ** 5

asv_bench/benchmarks/sparse.py

-1
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,6 @@ def time_sparse_array(self, dense_proportion, fill_value, dtype):
4545
class SparseDataFrameConstructor:
4646
def setup(self):
4747
N = 1000
48-
self.arr = np.arange(N)
4948
self.sparse = scipy.sparse.rand(N, N, 0.005)
5049

5150
def time_from_scipy(self):

ci/azure/posix.yml

+1-1
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ jobs:
2424
ENV_FILE: ci/deps/azure-36-locale_slow.yaml
2525
CONDA_PY: "36"
2626
PATTERN: "slow"
27-
# pandas does not use the language (zh_CN), but should support diferent encodings (utf8)
27+
# pandas does not use the language (zh_CN), but should support different encodings (utf8)
2828
# we should test with encodings different than utf8, but doesn't seem like Ubuntu supports any
2929
LANG: "zh_CN.utf8"
3030
LC_ALL: "zh_CN.utf8"

ci/code_checks.sh

+10-2
Original file line numberDiff line numberDiff line change
@@ -102,9 +102,17 @@ if [[ -z "$CHECK" || "$CHECK" == "lint" ]]; then
102102

103103
MSG='Check for use of not concatenated strings' ; echo $MSG
104104
if [[ "$GITHUB_ACTIONS" == "true" ]]; then
105-
$BASE_DIR/scripts/validate_string_concatenation.py --format="[error]{source_path}:{line_number}:{msg}" .
105+
$BASE_DIR/scripts/validate_unwanted_patterns.py --validation-type="strings_to_concatenate" --format="##[error]{source_path}:{line_number}:{msg}" .
106106
else
107-
$BASE_DIR/scripts/validate_string_concatenation.py .
107+
$BASE_DIR/scripts/validate_unwanted_patterns.py --validation-type="strings_to_concatenate" .
108+
fi
109+
RET=$(($RET + $?)) ; echo $MSG "DONE"
110+
111+
MSG='Check for strings with wrong placed spaces' ; echo $MSG
112+
if [[ "$GITHUB_ACTIONS" == "true" ]]; then
113+
$BASE_DIR/scripts/validate_unwanted_patterns.py --validation-type="strings_with_wrong_placed_whitespace" --format="##[error]{source_path}:{line_number}:{msg}" .
114+
else
115+
$BASE_DIR/scripts/validate_unwanted_patterns.py --validation-type="strings_with_wrong_placed_whitespace" .
108116
fi
109117
RET=$(($RET + $?)) ; echo $MSG "DONE"
110118

doc/source/conf.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -195,7 +195,7 @@
195195

196196
# The theme to use for HTML and HTML Help pages. Major themes that come with
197197
# Sphinx are currently 'default' and 'sphinxdoc'.
198-
html_theme = "pandas_sphinx_theme"
198+
html_theme = "pydata_sphinx_theme"
199199

200200
# The style sheet to use for HTML and HTML Help pages. A file of that name
201201
# must exist either in Sphinx' static/ path, or in one of the custom paths

doc/source/development/internals.rst

+2-8
Original file line numberDiff line numberDiff line change
@@ -89,16 +89,10 @@ pandas extends NumPy's type system with custom types, like ``Categorical`` or
8989
datetimes with a timezone, so we have multiple notions of "values". For 1-D
9090
containers (``Index`` classes and ``Series``) we have the following convention:
9191

92-
* ``cls._ndarray_values`` is *always* a NumPy ``ndarray``. Ideally,
93-
``_ndarray_values`` is cheap to compute. For example, for a ``Categorical``,
94-
this returns the codes, not the array of objects.
9592
* ``cls._values`` refers is the "best possible" array. This could be an
96-
``ndarray``, ``ExtensionArray``, or in ``Index`` subclass (note: we're in the
97-
process of removing the index subclasses here so that it's always an
98-
``ndarray`` or ``ExtensionArray``).
93+
``ndarray`` or ``ExtensionArray``.
9994

100-
So, for example, ``Series[category]._values`` is a ``Categorical``, while
101-
``Series[category]._ndarray_values`` is the underlying codes.
95+
So, for example, ``Series[category]._values`` is a ``Categorical``.
10296

10397
.. _ref-subclassing-pandas:
10498

doc/source/getting_started/intro_tutorials/02_read_write.rst

+1-1
Original file line numberDiff line numberDiff line change
@@ -225,7 +225,7 @@ The method :meth:`~DataFrame.info` provides technical information about a
225225
<div class="d-flex flex-row bg-light gs-torefguide">
226226
<span class="badge badge-info">To user guide</span>
227227

228-
For a complete overview of the input and output possibilites from and to pandas, see the user guide section about :ref:`reader and writer functions <io>`.
228+
For a complete overview of the input and output possibilities from and to pandas, see the user guide section about :ref:`reader and writer functions <io>`.
229229

230230
.. raw:: html
231231

doc/source/getting_started/intro_tutorials/03_subset_data.rst

+2-2
Original file line numberDiff line numberDiff line change
@@ -88,7 +88,7 @@ name of the column of interest.
8888
</ul>
8989

9090
Each column in a :class:`DataFrame` is a :class:`Series`. As a single column is
91-
selected, the returned object is a pandas :class:`DataFrame`. We can verify this
91+
selected, the returned object is a pandas :class:`Series`. We can verify this
9292
by checking the type of the output:
9393

9494
.. ipython:: python
@@ -101,7 +101,7 @@ And have a look at the ``shape`` of the output:
101101
102102
titanic["Age"].shape
103103
104-
:attr:`DataFrame.shape` is an attribute (remember :ref:`tutorial on reading and writing <10min_tut_02_read_write>`, do not use parantheses for attributes) of a
104+
:attr:`DataFrame.shape` is an attribute (remember :ref:`tutorial on reading and writing <10min_tut_02_read_write>`, do not use parentheses for attributes) of a
105105
pandas ``Series`` and ``DataFrame`` containing the number of rows and
106106
columns: *(nrows, ncolumns)*. A pandas Series is 1-dimensional and only
107107
the number of rows is returned.

doc/source/getting_started/intro_tutorials/08_combine_dataframes.rst

+1-1
Original file line numberDiff line numberDiff line change
@@ -165,7 +165,7 @@ index. For example:
165165
.. note::
166166
The existence of multiple row/column indices at the same time
167167
has not been mentioned within these tutorials. *Hierarchical indexing*
168-
or *MultiIndex* is an advanced and powerfull pandas feature to analyze
168+
or *MultiIndex* is an advanced and powerful pandas feature to analyze
169169
higher dimensional data.
170170

171171
Multi-indexing is out of scope for this pandas introduction. For the

doc/source/getting_started/intro_tutorials/10_text_data.rst

+1-1
Original file line numberDiff line numberDiff line change
@@ -188,7 +188,7 @@ Which passenger of the titanic has the longest name?
188188
189189
titanic["Name"].str.len()
190190
191-
To get the longest name we first have to get the lenghts of each of the
191+
To get the longest name we first have to get the lengths of each of the
192192
names in the ``Name`` column. By using pandas string methods, the
193193
:meth:`Series.str.len` function is applied to each of the names individually
194194
(element-wise).

doc/source/index.rst.template

-1
Original file line numberDiff line numberDiff line change
@@ -119,7 +119,6 @@ programming language.
119119
:titlesonly:
120120
{% endif %}
121121
{% if not single_doc %}
122-
What's New in 1.1.0 <whatsnew/v1.1.0>
123122
getting_started/index
124123
user_guide/index
125124
{% endif -%}

doc/source/reference/extensions.rst

-1
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,6 @@ objects.
3737
api.extensions.ExtensionArray._from_factorized
3838
api.extensions.ExtensionArray._from_sequence
3939
api.extensions.ExtensionArray._from_sequence_of_strings
40-
api.extensions.ExtensionArray._ndarray_values
4140
api.extensions.ExtensionArray._reduce
4241
api.extensions.ExtensionArray._values_for_argsort
4342
api.extensions.ExtensionArray._values_for_factorize

doc/source/user_guide/dsintro.rst

+1-1
Original file line numberDiff line numberDiff line change
@@ -406,7 +406,7 @@ From a list of dataclasses
406406

407407
Data Classes as introduced in `PEP557 <https://www.python.org/dev/peps/pep-0557>`__,
408408
can be passed into the DataFrame constructor.
409-
Passing a list of dataclasses is equivilent to passing a list of dictionaries.
409+
Passing a list of dataclasses is equivalent to passing a list of dictionaries.
410410

411411
Please be aware, that that all values in the list should be dataclasses, mixing
412412
types in the list would result in a TypeError.

doc/source/user_guide/indexing.rst

+1-1
Original file line numberDiff line numberDiff line change
@@ -374,7 +374,7 @@ For getting values with a boolean array:
374374
df1.loc['a'] > 0
375375
df1.loc[:, df1.loc['a'] > 0]
376376
377-
NA values in a boolean array propogate as ``False``:
377+
NA values in a boolean array propagate as ``False``:
378378

379379
.. versionchanged:: 1.0.2
380380

doc/source/user_guide/io.rst

+1-1
Original file line numberDiff line numberDiff line change
@@ -5005,7 +5005,7 @@ Possible values are:
50055005
This usually provides better performance for analytic databases
50065006
like *Presto* and *Redshift*, but has worse performance for
50075007
traditional SQL backend if the table contains many columns.
5008-
For more information check the SQLAlchemy `documention
5008+
For more information check the SQLAlchemy `documentation
50095009
<https://docs.sqlalchemy.org/en/latest/core/dml.html#sqlalchemy.sql.expression.Insert.values.params.*args>`__.
50105010
- callable with signature ``(pd_table, conn, keys, data_iter)``:
50115011
This can be used to implement a more performant insertion method based on

doc/source/user_guide/scale.rst

+2-1
Original file line numberDiff line numberDiff line change
@@ -246,6 +246,7 @@ We'll import ``dask.dataframe`` and notice that the API feels similar to pandas.
246246
We can use Dask's ``read_parquet`` function, but provide a globstring of files to read in.
247247

248248
.. ipython:: python
249+
:okwarning:
249250
250251
import dask.dataframe as dd
251252
@@ -258,7 +259,7 @@ Inspecting the ``ddf`` object, we see a few things
258259
* There are familiar methods like ``.groupby``, ``.sum``, etc.
259260
* There are new attributes like ``.npartitions`` and ``.divisions``
260261

261-
The partitions and divisions are how Dask parallizes computation. A **Dask**
262+
The partitions and divisions are how Dask parallelizes computation. A **Dask**
262263
DataFrame is made up of many **Pandas** DataFrames. A single method call on a
263264
Dask DataFrame ends up making many pandas method calls, and Dask knows how to
264265
coordinate everything to get the result.

doc/source/user_guide/style.ipynb

+2-2
Original file line numberDiff line numberDiff line change
@@ -620,8 +620,8 @@
620620
"aligns = ['left','zero','mid']\n",
621621
"for align in aligns:\n",
622622
" row = \"<tr><th>{}</th>\".format(align)\n",
623-
" for serie in [test1,test2,test3]:\n",
624-
" s = serie.copy()\n",
623+
" for series in [test1,test2,test3]:\n",
624+
" s = series.copy()\n",
625625
" s.name=''\n",
626626
" row += \"<td>{}</td>\".format(s.to_frame().style.bar(align=align, \n",
627627
" color=['#d65f5f', '#5fba7d'], \n",

doc/source/whatsnew/v0.24.0.rst

+1-1
Original file line numberDiff line numberDiff line change
@@ -397,7 +397,7 @@ Other enhancements
397397
- :func:`~DataFrame.to_csv`, :func:`~Series.to_csv`, :func:`~DataFrame.to_json`, and :func:`~Series.to_json` now support ``compression='infer'`` to infer compression based on filename extension (:issue:`15008`).
398398
The default compression for ``to_csv``, ``to_json``, and ``to_pickle`` methods has been updated to ``'infer'`` (:issue:`22004`).
399399
- :meth:`DataFrame.to_sql` now supports writing ``TIMESTAMP WITH TIME ZONE`` types for supported databases. For databases that don't support timezones, datetime data will be stored as timezone unaware local timestamps. See the :ref:`io.sql_datetime_data` for implications (:issue:`9086`).
400-
- :func:`to_timedelta` now supports iso-formated timedelta strings (:issue:`21877`)
400+
- :func:`to_timedelta` now supports iso-formatted timedelta strings (:issue:`21877`)
401401
- :class:`Series` and :class:`DataFrame` now support :class:`Iterable` objects in the constructor (:issue:`2193`)
402402
- :class:`DatetimeIndex` has gained the :attr:`DatetimeIndex.timetz` attribute. This returns the local time with timezone information. (:issue:`21358`)
403403
- :meth:`~Timestamp.round`, :meth:`~Timestamp.ceil`, and :meth:`~Timestamp.floor` for :class:`DatetimeIndex` and :class:`Timestamp`

doc/source/whatsnew/v1.0.1.rst

+1-1
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ Fixed regressions
1616
~~~~~~~~~~~~~~~~~
1717

1818
- Fixed regression in :class:`DataFrame` setting values with a slice (e.g. ``df[-4:] = 1``) indexing by label instead of position (:issue:`31469`)
19-
- Fixed regression when indexing a ``Series`` or ``DataFrame`` indexed by ``DatetimeIndex`` with a slice containg a :class:`datetime.date` (:issue:`31501`)
19+
- Fixed regression when indexing a ``Series`` or ``DataFrame`` indexed by ``DatetimeIndex`` with a slice containing a :class:`datetime.date` (:issue:`31501`)
2020
- Fixed regression in ``DataFrame.__setitem__`` raising an ``AttributeError`` with a :class:`MultiIndex` and a non-monotonic indexer (:issue:`31449`)
2121
- Fixed regression in :class:`Series` multiplication when multiplying a numeric :class:`Series` with >10000 elements with a timedelta-like scalar (:issue:`31457`)
2222
- Fixed regression in ``.groupby().agg()`` raising an ``AssertionError`` for some reductions like ``min`` on object-dtype columns (:issue:`31522`)

doc/source/whatsnew/v1.1.0.rst

+12-2
Original file line numberDiff line numberDiff line change
@@ -224,6 +224,10 @@ Performance improvements
224224
- The internal index method :meth:`~Index._shallow_copy` now copies cached attributes over to the new index,
225225
avoiding creating these again on the new index. This can speed up many operations that depend on creating copies of
226226
existing indexes (:issue:`28584`, :issue:`32640`, :issue:`32669`)
227+
- Significant performance improvement when creating a :class:`DataFrame` with
228+
sparse values from ``scipy.sparse`` matrices using the
229+
:meth:`DataFrame.sparse.from_spmatrix` constructor (:issue:`32821`,
230+
:issue:`32825`, :issue:`32826`, :issue:`32856`, :issue:`32858`).
227231

228232
.. ---------------------------------------------------------------------------
229233
@@ -303,6 +307,7 @@ Indexing
303307
- Bug in :meth:`Series.loc` and :meth:`DataFrame.loc` when indexing with an integer key on a object-dtype :class:`Index` that is not all-integers (:issue:`31905`)
304308
- Bug in :meth:`DataFrame.iloc.__setitem__` on a :class:`DataFrame` with duplicate columns incorrectly setting values for all matching columns (:issue:`15686`, :issue:`22036`)
305309
- Bug in :meth:`DataFrame.loc:` and :meth:`Series.loc` with a :class:`DatetimeIndex`, :class:`TimedeltaIndex`, or :class:`PeriodIndex` incorrectly allowing lookups of non-matching datetime-like dtypes (:issue:`32650`)
310+
- Bug in :meth:`Series.__getitem__` indexing with non-standard scalars, e.g. ``np.dtype`` (:issue:`32684`)
306311

307312
Missing
308313
^^^^^^^
@@ -334,18 +339,21 @@ MultiIndex
334339

335340
I/O
336341
^^^
337-
- Bug in :meth:`read_json` where integer overflow was occuring when json contains big number strings. (:issue:`30320`)
342+
- Bug in :meth:`read_json` where integer overflow was occurring when json contains big number strings. (:issue:`30320`)
338343
- `read_csv` will now raise a ``ValueError`` when the arguments `header` and `prefix` both are not `None`. (:issue:`27394`)
339344
- Bug in :meth:`DataFrame.to_json` was raising ``NotFoundError`` when ``path_or_buf`` was an S3 URI (:issue:`28375`)
340345
- Bug in :meth:`DataFrame.to_parquet` overwriting pyarrow's default for
341346
``coerce_timestamps``; following pyarrow's default allows writing nanosecond
342347
timestamps with ``version="2.0"`` (:issue:`31652`).
343348
- Bug in :meth:`read_csv` was raising `TypeError` when `sep=None` was used in combination with `comment` keyword (:issue:`31396`)
344349
- Bug in :class:`HDFStore` that caused it to set to ``int64`` the dtype of a ``datetime64`` column when reading a DataFrame in Python 3 from fixed format written in Python 2 (:issue:`31750`)
350+
- Bug in :meth:`DataFrame.to_json` where ``Timedelta`` objects would not be serialized correctly with ``date_format="iso"`` (:issue:`28256`)
345351
- :func:`read_csv` will raise a ``ValueError`` when the column names passed in `parse_dates` are missing in the Dataframe (:issue:`31251`)
346352
- Bug in :meth:`read_excel` where a UTF-8 string with a high surrogate would cause a segmentation violation (:issue:`23809`)
347353
- Bug in :meth:`read_csv` was causing a file descriptor leak on an empty file (:issue:`31488`)
348354
- Bug in :meth:`read_csv` was causing a segfault when there were blank lines between the header and data rows (:issue:`28071`)
355+
- Bug in :meth:`read_csv` was raising a misleading exception on a permissions issue (:issue:`23784`)
356+
- Bug in :meth:`read_csv` was raising an ``IndexError`` when header=None and 2 extra data columns
349357

350358

351359
Plotting
@@ -377,7 +385,7 @@ Reshaping
377385
- :meth:`DataFrame.replace` and :meth:`Series.replace` will raise a ``TypeError`` if ``to_replace`` is not an expected type. Previously the ``replace`` would fail silently (:issue:`18634`)
378386
- Bug in :meth:`DataFrame.apply` where callback was called with :class:`Series` parameter even though ``raw=True`` requested. (:issue:`32423`)
379387
- Bug in :meth:`DataFrame.pivot_table` losing timezone information when creating a :class:`MultiIndex` level from a column with timezone-aware dtype (:issue:`32558`)
380-
388+
- :meth:`DataFrame.agg` now provides more descriptive ``SpecificationError`` message when attempting to aggregating non-existant column (:issue:`32755`)
381389

382390
Sparse
383391
^^^^^^
@@ -402,6 +410,8 @@ Other
402410
- Fixed :func:`pandas.testing.assert_series_equal` to correctly raise if left object is a different subclass with ``check_series_type=True`` (:issue:`32670`).
403411
- :meth:`IntegerArray.astype` now supports ``datetime64`` dtype (:issue:32538`)
404412
- Fixed bug in :func:`pandas.testing.assert_series_equal` where dtypes were checked for ``Interval`` and ``ExtensionArray`` operands when ``check_dtype`` was ``False`` (:issue:`32747`)
413+
- Bug in :meth:`Series.map` not raising on invalid ``na_action`` (:issue:`32815`)
414+
- Bug in :meth:`DataFrame.__dir__` caused a segfault when using unicode surrogates in a column name (:issue:`25509`)
405415

406416
.. ---------------------------------------------------------------------------
407417

doc/sphinxext/announce.py

+15-2
Original file line numberDiff line numberDiff line change
@@ -68,8 +68,21 @@ def get_authors(revision_range):
6868
revision_range = f"{lst_release}..{cur_release}"
6969

7070
# authors, in current release and previous to current release.
71-
cur = set(re.findall(pat, this_repo.git.shortlog("-s", revision_range), re.M))
72-
pre = set(re.findall(pat, this_repo.git.shortlog("-s", lst_release), re.M))
71+
# We need two passes over the log for cur and prev, one to get the
72+
# "Co-authored by" commits, which come from backports by the bot,
73+
# and one for regular commits.
74+
xpr = re.compile(r"Co-authored-by: (?P<name>[^<]+) ")
75+
cur = set(
76+
xpr.findall(
77+
this_repo.git.log("--grep=Co-authored", "--pretty=%b", revision_range)
78+
)
79+
)
80+
cur |= set(re.findall(pat, this_repo.git.shortlog("-s", revision_range), re.M))
81+
82+
pre = set(
83+
xpr.findall(this_repo.git.log("--grep=Co-authored", "--pretty=%b", lst_release))
84+
)
85+
pre |= set(re.findall(pat, this_repo.git.shortlog("-s", lst_release), re.M))
7386

7487
# Homu is the author of auto merges, clean him out.
7588
cur.discard("Homu")

0 commit comments

Comments
 (0)