diff --git a/.travis.yml b/.travis.yml index e5e05ed26da56..f0ece15de65db 100644 --- a/.travis.yml +++ b/.travis.yml @@ -123,7 +123,7 @@ after_success: after_script: - echo "after_script start" - - source activate pandas && python -c "import pandas; pandas.show_versions();" + - source activate pandas && cd /tmp && python -c "import pandas; pandas.show_versions();" - if [ -e /tmp/single.xml ]; then ci/print_skipped.py /tmp/single.xml; fi diff --git a/ci/install_travis.sh b/ci/install_travis.sh index 601edded29f5a..8cf6f2ce636da 100755 --- a/ci/install_travis.sh +++ b/ci/install_travis.sh @@ -119,15 +119,7 @@ if [ "$COVERAGE" ]; then fi echo -if [ "$BUILD_TEST" ]; then - - # build & install testing - echo ["Starting installation test."] - bash ci/install_release_build.sh - conda uninstall -y cython - time pip install dist/*tar.gz || exit 1 - -else +if [ -z "$BUILD_TEST" ]; then # build but don't install echo "[build em]" @@ -163,9 +155,22 @@ fi # w/o removing anything else echo echo "[removing installed pandas]" -conda remove pandas --force +conda remove pandas -y --force -if [ -z "$BUILD_TEST" ]; then +if [ "$BUILD_TEST" ]; then + + # remove any installation + pip uninstall -y pandas + conda list pandas + pip list --format columns |grep pandas + + # build & install testing + echo ["building release"] + bash scripts/build_dist_for_release.sh + conda uninstall -y cython + time pip install dist/*tar.gz || exit 1 + +else # install our pandas echo diff --git a/ci/script_multi.sh b/ci/script_multi.sh index 663d2feb5be23..daa929e177666 100755 --- a/ci/script_multi.sh +++ b/ci/script_multi.sh @@ -19,20 +19,26 @@ export PYTHONHASHSEED=$(python -c 'import random; print(random.randint(1, 429496 echo PYTHONHASHSEED=$PYTHONHASHSEED if [ "$BUILD_TEST" ]; then - echo "build-test" + echo "[build-test]" + + echo "[env]" + pip list --format columns |grep pandas + + echo "[running]" cd /tmp - pwd - conda list pandas - echo "running" - python -c "import pandas; pandas.test(['-n 2'])" + unset PYTHONPATH + python -c "import pandas; pandas.test(['-n 2', '--skip-slow', '--skip-network', '-r xX'])" + elif [ "$DOC" ]; then echo "We are not running pytest as this is a doc-build" + elif [ "$COVERAGE" ]; then echo pytest -s -n 2 -m "not single" --cov=pandas --cov-report xml:/tmp/cov-multiple.xml --junitxml=/tmp/multiple.xml $TEST_ARGS pandas pytest -s -n 2 -m "not single" --cov=pandas --cov-report xml:/tmp/cov-multiple.xml --junitxml=/tmp/multiple.xml $TEST_ARGS pandas + else - echo pytest -n 2 -m "not single" --junitxml=/tmp/multiple.xml $TEST_ARGS pandas - pytest -n 2 -m "not single" --junitxml=/tmp/multiple.xml $TEST_ARGS pandas # TODO: doctest + echo pytest -n 2 -r xX -m "not single" --junitxml=/tmp/multiple.xml $TEST_ARGS pandas + pytest -n 2 -r xX -m "not single" --junitxml=/tmp/multiple.xml $TEST_ARGS pandas # TODO: doctest fi RET="$?" diff --git a/ci/script_single.sh b/ci/script_single.sh index db637679f0e0f..245b4e6152c4d 100755 --- a/ci/script_single.sh +++ b/ci/script_single.sh @@ -20,8 +20,8 @@ elif [ "$COVERAGE" ]; then echo pytest -s -m "single" --cov=pandas --cov-report xml:/tmp/cov-single.xml --junitxml=/tmp/single.xml $TEST_ARGS pandas pytest -s -m "single" --cov=pandas --cov-report xml:/tmp/cov-single.xml --junitxml=/tmp/single.xml $TEST_ARGS pandas else - echo pytest -m "single" --junitxml=/tmp/single.xml $TEST_ARGS pandas - pytest -m "single" --junitxml=/tmp/single.xml $TEST_ARGS pandas # TODO: doctest + echo pytest -m "single" -r xX --junitxml=/tmp/single.xml $TEST_ARGS pandas + pytest -m "single" -r xX --junitxml=/tmp/single.xml $TEST_ARGS pandas # TODO: doctest fi RET="$?" diff --git a/pandas/tests/test_downstream.py b/pandas/tests/test_downstream.py index 2baedb82aa2a7..12976272cb8b1 100644 --- a/pandas/tests/test_downstream.py +++ b/pandas/tests/test_downstream.py @@ -4,7 +4,28 @@ import pytest import numpy as np # noqa from pandas import DataFrame +from pandas.compat import PY36 from pandas.util import testing as tm +import importlib + + +def import_module(name): + # we *only* want to skip if the module is truly not available + # and NOT just an actual import error because of pandas changes + + if PY36: + try: + return importlib.import_module(name) + except ModuleNotFoundError: # noqa + pytest.skip("skipping as {} not available".format(name)) + + else: + try: + return importlib.import_module(name) + except ImportError as e: + if "No module named" in str(e) and name in str(e): + pytest.skip("skipping as {} not available".format(name)) + raise @pytest.fixture @@ -14,8 +35,8 @@ def df(): def test_dask(df): - toolz = pytest.importorskip('toolz') # noqa - dask = pytest.importorskip('dask') # noqa + toolz = import_module('toolz') # noqa + dask = import_module('dask') # noqa import dask.dataframe as dd @@ -26,14 +47,14 @@ def test_dask(df): def test_xarray(df): - xarray = pytest.importorskip('xarray') # noqa + xarray = import_module('xarray') # noqa assert df.to_xarray() is not None def test_statsmodels(): - statsmodels = pytest.importorskip('statsmodels') # noqa + statsmodels = import_module('statsmodels') # noqa import statsmodels.api as sm import statsmodels.formula.api as smf df = sm.datasets.get_rdataset("Guerry", "HistData").data @@ -42,7 +63,7 @@ def test_statsmodels(): def test_scikit_learn(df): - sklearn = pytest.importorskip('sklearn') # noqa + sklearn = import_module('sklearn') # noqa from sklearn import svm, datasets digits = datasets.load_digits() @@ -53,33 +74,34 @@ def test_scikit_learn(df): def test_seaborn(): - seaborn = pytest.importorskip('seaborn') + seaborn = import_module('seaborn') tips = seaborn.load_dataset("tips") seaborn.stripplot(x="day", y="total_bill", data=tips) def test_pandas_gbq(df): - pandas_gbq = pytest.importorskip('pandas-gbq') # noqa + pandas_gbq = import_module('pandas_gbq') # noqa -@tm.network +@pytest.mark.xfail(reason=("pandas_datareader<=0.3.0 " + "broken w.r.t. pandas >= 0.20.0")) def test_pandas_datareader(): - pandas_datareader = pytest.importorskip('pandas-datareader') # noqa + pandas_datareader = import_module('pandas_datareader') # noqa pandas_datareader.get_data_yahoo('AAPL') def test_geopandas(): - geopandas = pytest.importorskip('geopandas') # noqa + geopandas = import_module('geopandas') # noqa fp = geopandas.datasets.get_path('naturalearth_lowres') assert geopandas.read_file(fp) is not None def test_pyarrow(df): - pyarrow = pytest.importorskip('pyarrow') # noqa + pyarrow = import_module('pyarrow') # noqa table = pyarrow.Table.from_pandas(df) result = table.to_pandas() tm.assert_frame_equal(result, df) diff --git a/scripts/build_dist.sh b/scripts/build_dist.sh index d6a7d0ba67239..c3f849ce7a6eb 100755 --- a/scripts/build_dist.sh +++ b/scripts/build_dist.sh @@ -10,11 +10,7 @@ read -p "Ok to continue (y/n)? " answer case ${answer:0:1} in y|Y ) echo "Building distribution" - rm -rf dist - git clean -xfd - python setup.py clean - python setup.py cython - python setup.py sdist --formats=gztar + ./build_dist_for_release.sh ;; * ) echo "Not building distribution" diff --git a/ci/install_release_build.sh b/scripts/build_dist_for_release.sh similarity index 69% rename from ci/install_release_build.sh rename to scripts/build_dist_for_release.sh index f8373176643fa..e77974ae08b0c 100644 --- a/ci/install_release_build.sh +++ b/scripts/build_dist_for_release.sh @@ -2,7 +2,7 @@ # this requires cython to be installed -# this builds the release cleanly +# this builds the release cleanly & is building on the current checkout rm -rf dist git clean -xfd python setup.py clean