Skip to content

Commit 87a1d31

Browse files
committed
Merge remote-tracking branch 'upstream/master' into concat-empty-ea
2 parents bd316c3 + 9223d19 commit 87a1d31

File tree

599 files changed

+26769
-17453
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

599 files changed

+26769
-17453
lines changed

.travis.yml

+21-3
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,8 @@ cache:
1414

1515
env:
1616
global:
17+
# Variable for test workers
18+
- PYTEST_WORKERS="auto"
1719
# create a github personal access token
1820
# cd pandas-dev/pandas
1921
# travis encrypt 'PANDAS_GH_TOKEN=personal_access_token' -r pandas-dev/pandas
@@ -27,12 +29,21 @@ matrix:
2729
fast_finish: true
2830

2931
include:
32+
# In allowed failures
33+
- dist: bionic
34+
python: 3.9-dev
35+
env:
36+
- JOB="3.9-dev" PATTERN="(not slow and not network and not clipboard)"
3037
- env:
3138
- JOB="3.8" ENV_FILE="ci/deps/travis-38.yaml" PATTERN="(not slow and not network and not clipboard)"
3239

3340
- env:
3441
- JOB="3.7" ENV_FILE="ci/deps/travis-37.yaml" PATTERN="(not slow and not network and not clipboard)"
3542

43+
- arch: arm64
44+
env:
45+
- JOB="3.7, arm64" PYTEST_WORKERS=8 ENV_FILE="ci/deps/travis-37-arm64.yaml" PATTERN="(not slow and not network and not clipboard)"
46+
3647
- env:
3748
- JOB="3.6, locale" ENV_FILE="ci/deps/travis-36-locale.yaml" PATTERN="((not slow and not network and not clipboard) or (single and db))" LOCALE_OVERRIDE="zh_CN.UTF-8" SQL="1"
3849
services:
@@ -53,11 +64,18 @@ matrix:
5364
services:
5465
- mysql
5566
- postgresql
67+
allow_failures:
68+
- arch: arm64
69+
env:
70+
- JOB="3.7, arm64" PYTEST_WORKERS=8 ENV_FILE="ci/deps/travis-37-arm64.yaml" PATTERN="(not slow and not network and not clipboard)"
71+
- dist: bionic
72+
python: 3.9-dev
73+
env:
74+
- JOB="3.9-dev" PATTERN="(not slow and not network)"
5675

5776
before_install:
5877
- echo "before_install"
59-
# set non-blocking IO on travis
60-
# https://github.com/travis-ci/travis-ci/issues/8920#issuecomment-352661024
78+
# Use blocking IO on travis. Ref: https://github.com/travis-ci/travis-ci/issues/8920#issuecomment-352661024
6179
- python -c 'import os,sys,fcntl; flags = fcntl.fcntl(sys.stdout, fcntl.F_GETFL); fcntl.fcntl(sys.stdout, fcntl.F_SETFL, flags&~os.O_NONBLOCK);'
6280
- source ci/travis_process_gbq_encryption.sh
6381
- export PATH="$HOME/miniconda3/bin:$PATH"
@@ -83,7 +101,7 @@ install:
83101
script:
84102
- echo "script start"
85103
- echo "$JOB"
86-
- source activate pandas-dev
104+
- if [ "$JOB" != "3.9-dev" ]; then source activate pandas-dev; fi
87105
- ci/run_tests.sh
88106

89107
after_script:

README.md

+1
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@
1616
[![Downloads](https://anaconda.org/conda-forge/pandas/badges/downloads.svg)](https://pandas.pydata.org)
1717
[![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/pydata/pandas)
1818
[![Powered by NumFOCUS](https://img.shields.io/badge/powered%20by-NumFOCUS-orange.svg?style=flat&colorA=E1523D&colorB=007D8A)](https://numfocus.org)
19+
[![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black)
1920

2021
## What is it?
2122

asv_bench/benchmarks/algorithms.py

+14-3
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,16 @@ class Factorize:
3434
params = [
3535
[True, False],
3636
[True, False],
37-
["int", "uint", "float", "string", "datetime64[ns]", "datetime64[ns, tz]"],
37+
[
38+
"int",
39+
"uint",
40+
"float",
41+
"string",
42+
"datetime64[ns]",
43+
"datetime64[ns, tz]",
44+
"Int64",
45+
"boolean",
46+
],
3847
]
3948
param_names = ["unique", "sort", "dtype"]
4049

@@ -49,13 +58,15 @@ def setup(self, unique, sort, dtype):
4958
"datetime64[ns, tz]": pd.date_range(
5059
"2011-01-01", freq="H", periods=N, tz="Asia/Tokyo"
5160
),
61+
"Int64": pd.array(np.arange(N), dtype="Int64"),
62+
"boolean": pd.array(np.random.randint(0, 2, N), dtype="boolean"),
5263
}[dtype]
5364
if not unique:
5465
data = data.repeat(5)
55-
self.idx = data
66+
self.data = data
5667

5768
def time_factorize(self, unique, sort, dtype):
58-
self.idx.factorize(sort=sort)
69+
pd.factorize(self.data, sort=sort)
5970

6071

6172
class Duplicated:

asv_bench/benchmarks/arithmetic.py

+85-3
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,7 @@ def time_series_op_with_fill_value_no_nas(self):
6767
self.ser.add(self.ser, fill_value=4)
6868

6969

70-
class MixedFrameWithSeriesAxis0:
70+
class MixedFrameWithSeriesAxis:
7171
params = [
7272
[
7373
"eq",
@@ -78,7 +78,7 @@ class MixedFrameWithSeriesAxis0:
7878
"gt",
7979
"add",
8080
"sub",
81-
"div",
81+
"truediv",
8282
"floordiv",
8383
"mul",
8484
"pow",
@@ -87,15 +87,72 @@ class MixedFrameWithSeriesAxis0:
8787
param_names = ["opname"]
8888

8989
def setup(self, opname):
90-
arr = np.arange(10 ** 6).reshape(100, -1)
90+
arr = np.arange(10 ** 6).reshape(1000, -1)
9191
df = DataFrame(arr)
9292
df["C"] = 1.0
9393
self.df = df
9494
self.ser = df[0]
95+
self.row = df.iloc[0]
9596

9697
def time_frame_op_with_series_axis0(self, opname):
9798
getattr(self.df, opname)(self.ser, axis=0)
9899

100+
def time_frame_op_with_series_axis1(self, opname):
101+
getattr(operator, opname)(self.df, self.ser)
102+
103+
104+
class FrameWithFrameWide:
105+
# Many-columns, mixed dtypes
106+
107+
params = [
108+
[
109+
# GH#32779 has discussion of which operators are included here
110+
operator.add,
111+
operator.floordiv,
112+
operator.gt,
113+
]
114+
]
115+
param_names = ["op"]
116+
117+
def setup(self, op):
118+
# we choose dtypes so as to make the blocks
119+
# a) not perfectly match between right and left
120+
# b) appreciably bigger than single columns
121+
n_cols = 2000
122+
n_rows = 500
123+
124+
# construct dataframe with 2 blocks
125+
arr1 = np.random.randn(n_rows, int(n_cols / 2)).astype("f8")
126+
arr2 = np.random.randn(n_rows, int(n_cols / 2)).astype("f4")
127+
df = pd.concat(
128+
[pd.DataFrame(arr1), pd.DataFrame(arr2)], axis=1, ignore_index=True,
129+
)
130+
# should already be the case, but just to be sure
131+
df._consolidate_inplace()
132+
133+
# TODO: GH#33198 the setting here shoudlnt need two steps
134+
arr1 = np.random.randn(n_rows, int(n_cols / 4)).astype("f8")
135+
arr2 = np.random.randn(n_rows, int(n_cols / 2)).astype("i8")
136+
arr3 = np.random.randn(n_rows, int(n_cols / 4)).astype("f8")
137+
df2 = pd.concat(
138+
[pd.DataFrame(arr1), pd.DataFrame(arr2), pd.DataFrame(arr3)],
139+
axis=1,
140+
ignore_index=True,
141+
)
142+
# should already be the case, but just to be sure
143+
df2._consolidate_inplace()
144+
145+
self.left = df
146+
self.right = df2
147+
148+
def time_op_different_blocks(self, op):
149+
# blocks (and dtypes) are not aligned
150+
op(self.left, self.right)
151+
152+
def time_op_same_blocks(self, op):
153+
# blocks (and dtypes) are aligned
154+
op(self.left, self.left)
155+
99156

100157
class Ops:
101158

@@ -412,4 +469,29 @@ def time_apply_index(self, offset):
412469
offset.apply_index(self.rng)
413470

414471

472+
class BinaryOpsMultiIndex:
473+
params = ["sub", "add", "mul", "div"]
474+
param_names = ["func"]
475+
476+
def setup(self, func):
477+
date_range = pd.date_range("20200101 00:00", "20200102 0:00", freq="S")
478+
level_0_names = [str(i) for i in range(30)]
479+
480+
index = pd.MultiIndex.from_product([level_0_names, date_range])
481+
column_names = ["col_1", "col_2"]
482+
483+
self.df = pd.DataFrame(
484+
np.random.rand(len(index), 2), index=index, columns=column_names
485+
)
486+
487+
self.arg_df = pd.DataFrame(
488+
np.random.randint(1, 10, (len(level_0_names), 2)),
489+
index=level_0_names,
490+
columns=column_names,
491+
)
492+
493+
def time_binary_op_multiindex(self, func):
494+
getattr(self.df, func)(self.arg_df, level=0)
495+
496+
415497
from .pandas_vb_common import setup # noqa: F401 isort:skip

asv_bench/benchmarks/frame_methods.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -564,7 +564,7 @@ def setup(self):
564564

565565
def time_frame_get_dtype_counts(self):
566566
with warnings.catch_warnings(record=True):
567-
self.df._data.get_dtype_counts()
567+
self.df.dtypes.value_counts()
568568

569569
def time_info(self):
570570
self.df.info()

asv_bench/benchmarks/groupby.py

+92
Original file line numberDiff line numberDiff line change
@@ -626,4 +626,96 @@ def time_first(self):
626626
self.df_nans.groupby("key").transform("first")
627627

628628

629+
class TransformEngine:
630+
def setup(self):
631+
N = 10 ** 3
632+
data = DataFrame(
633+
{0: [str(i) for i in range(100)] * N, 1: list(range(100)) * N},
634+
columns=[0, 1],
635+
)
636+
self.grouper = data.groupby(0)
637+
638+
def time_series_numba(self):
639+
def function(values, index):
640+
return values * 5
641+
642+
self.grouper[1].transform(function, engine="numba")
643+
644+
def time_series_cython(self):
645+
def function(values):
646+
return values * 5
647+
648+
self.grouper[1].transform(function, engine="cython")
649+
650+
def time_dataframe_numba(self):
651+
def function(values, index):
652+
return values * 5
653+
654+
self.grouper.transform(function, engine="numba")
655+
656+
def time_dataframe_cython(self):
657+
def function(values):
658+
return values * 5
659+
660+
self.grouper.transform(function, engine="cython")
661+
662+
663+
class AggEngine:
664+
def setup(self):
665+
N = 10 ** 3
666+
data = DataFrame(
667+
{0: [str(i) for i in range(100)] * N, 1: list(range(100)) * N},
668+
columns=[0, 1],
669+
)
670+
self.grouper = data.groupby(0)
671+
672+
def time_series_numba(self):
673+
def function(values, index):
674+
total = 0
675+
for i, value in enumerate(values):
676+
if i % 2:
677+
total += value + 5
678+
else:
679+
total += value * 2
680+
return total
681+
682+
self.grouper[1].agg(function, engine="numba")
683+
684+
def time_series_cython(self):
685+
def function(values):
686+
total = 0
687+
for i, value in enumerate(values):
688+
if i % 2:
689+
total += value + 5
690+
else:
691+
total += value * 2
692+
return total
693+
694+
self.grouper[1].agg(function, engine="cython")
695+
696+
def time_dataframe_numba(self):
697+
def function(values, index):
698+
total = 0
699+
for i, value in enumerate(values):
700+
if i % 2:
701+
total += value + 5
702+
else:
703+
total += value * 2
704+
return total
705+
706+
self.grouper.agg(function, engine="numba")
707+
708+
def time_dataframe_cython(self):
709+
def function(values):
710+
total = 0
711+
for i, value in enumerate(values):
712+
if i % 2:
713+
total += value + 5
714+
else:
715+
total += value * 2
716+
return total
717+
718+
self.grouper.agg(function, engine="cython")
719+
720+
629721
from .pandas_vb_common import setup # noqa: F401 isort:skip

asv_bench/benchmarks/io/parsers.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22

33
try:
44
from pandas._libs.tslibs.parsing import (
5-
_concat_date_cols,
5+
concat_date_cols,
66
_does_string_look_like_datetime,
77
)
88
except ImportError:
@@ -39,4 +39,4 @@ def setup(self, value, dim):
3939
)
4040

4141
def time_check_concat(self, value, dim):
42-
_concat_date_cols(self.object)
42+
concat_date_cols(self.object)

0 commit comments

Comments
 (0)