diff --git a/.travis.yml b/.travis.yml index 6bbc44fba864a..03026647d6bb8 100644 --- a/.travis.yml +++ b/.travis.yml @@ -52,7 +52,7 @@ matrix: - python-gtk2 - dist: trusty env: - - JOB="3.6, lint, coverage" ENV_FILE="ci/deps/travis-36.yaml" PATTERN="not slow and not network" PANDAS_TESTING_MODE="deprecate" COVERAGE=true LINT=true + - JOB="3.6, coverage" ENV_FILE="ci/deps/travis-36.yaml" PATTERN="not slow and not network" PANDAS_TESTING_MODE="deprecate" COVERAGE=true - dist: trusty env: - JOB="3.7, NumPy dev" ENV_FILE="ci/deps/travis-37-numpydev.yaml" PATTERN="not slow and not network" TEST_ARGS="-W error" PANDAS_TESTING_MODE="deprecate" @@ -108,7 +108,6 @@ script: - source activate pandas-dev - ci/run_build_docs.sh - ci/run_tests.sh - - ci/code_checks.sh after_script: - echo "after_script start" diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 373c22fdf8e62..a58f82ec6de49 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -23,3 +23,104 @@ jobs: parameters: name: WindowsPy27 vmImage: vs2017-win2016 + +- job: 'Checks_and_doc' + pool: + vmImage: ubuntu-16.04 + timeoutInMinutes: 90 + steps: + - script: | + # XXX next command should avoid redefining the path in every step, but + # made the process crash as it couldn't find deactivate + #echo '##vso[task.prependpath]$HOME/miniconda3/bin' + echo '##vso[task.setvariable variable=CONDA_ENV]pandas-dev' + echo '##vso[task.setvariable variable=ENV_FILE]environment.yml' + echo '##vso[task.setvariable variable=AZURE]true' + displayName: 'Setting environment variables' + + # Do not require a conda environment + - script: | + export PATH=$HOME/miniconda3/bin:$PATH + ci/code_checks.sh patterns + displayName: 'Looking for unwanted patterns' + condition: true + + - script: | + export PATH=$HOME/miniconda3/bin:$PATH + sudo apt-get install -y libc6-dev-i386 + ci/incremental/install_miniconda.sh + ci/incremental/setup_conda_environment.sh + displayName: 'Set up environment' + + # Do not require pandas + - script: | + export PATH=$HOME/miniconda3/bin:$PATH + source activate pandas-dev + ci/code_checks.sh lint + displayName: 'Linting' + condition: true + + - script: | + export PATH=$HOME/miniconda3/bin:$PATH + source activate pandas-dev + ci/code_checks.sh dependencies + displayName: 'Dependencies consistency' + condition: true + + - script: | + export PATH=$HOME/miniconda3/bin:$PATH + source activate pandas-dev + ci/incremental/build.sh + displayName: 'Build' + condition: true + + # Require pandas + - script: | + export PATH=$HOME/miniconda3/bin:$PATH + source activate pandas-dev + ci/code_checks.sh code + displayName: 'Checks on imported code' + condition: true + + - script: | + export PATH=$HOME/miniconda3/bin:$PATH + source activate pandas-dev + ci/code_checks.sh doctests + displayName: 'Running doctests' + condition: true + + - script: | + export PATH=$HOME/miniconda3/bin:$PATH + source activate pandas-dev + ci/code_checks.sh docstrings + displayName: 'Docstring validation' + condition: true + + - script: | + export PATH=$HOME/miniconda3/bin:$PATH + source activate pandas-dev + pytest --capture=no --strict scripts + displayName: 'Testing docstring validaton script' + condition: true + + - script: | + export PATH=$HOME/miniconda3/bin:$PATH + source activate pandas-dev + git remote add upstream https://github.com/pandas-dev/pandas.git + git fetch upstream + if git diff upstream/master --name-only | grep -q "^asv_bench/"; then + cd asv_bench + asv machine --yes + ASV_OUTPUT="$(asv dev)" + if [[ $(echo "$ASV_OUTPUT" | grep "failed") ]]; then + echo "##vso[task.logissue type=error]Benchmarks run with errors" + echo $ASV_OUTPUT + exit 1 + else + echo "Benchmarks run without errors" + fi + else + echo "Benchmarks did not run, no changes detected" + fi + displayName: 'Running benchmarks' + condition: true diff --git a/ci/code_checks.sh b/ci/code_checks.sh index 5d0356dc8be9c..a8a86eedb0549 100755 --- a/ci/code_checks.sh +++ b/ci/code_checks.sh @@ -5,25 +5,48 @@ # This script is intended for both the CI and to check locally that code standards are # respected. We are currently linting (PEP-8 and similar), looking for patterns of # common mistakes (sphinx directives with missing blank lines, old style classes, -# unwanted imports...), and we also run doctests here (currently some files only). -# In the future we may want to add the validation of docstrings and other checks here. +# unwanted imports...), we run doctests here (currently some files only), and we +# validate formatting error in docstrings. # # Usage: # $ ./ci/code_checks.sh # run all checks # $ ./ci/code_checks.sh lint # run linting only # $ ./ci/code_checks.sh patterns # check for patterns that should not exist +# $ ./ci/code_checks.sh code # checks on imported code # $ ./ci/code_checks.sh doctests # run doctests +# $ ./ci/code_checks.sh docstrings # validate docstring errors # $ ./ci/code_checks.sh dependencies # check that dependencies are consistent -echo "inside $0" -[[ $LINT ]] || { echo "NOT Linting. To lint use: LINT=true $0 $1"; exit 0; } -[[ -z "$1" || "$1" == "lint" || "$1" == "patterns" || "$1" == "doctests" || "$1" == "dependencies" ]] \ - || { echo "Unknown command $1. Usage: $0 [lint|patterns|doctests|dependencies]"; exit 9999; } +[[ -z "$1" || "$1" == "lint" || "$1" == "patterns" || "$1" == "code" || "$1" == "doctests" || "$1" == "docstrings" || "$1" == "dependencies" ]] || \ + { echo "Unknown command $1. Usage: $0 [lint|patterns|code|doctests|docstrings|dependencies]"; exit 9999; } BASE_DIR="$(dirname $0)/.." RET=0 CHECK=$1 +function invgrep { + # grep with inverse exist status and formatting for azure-pipelines + # + # This function works exactly as grep, but with opposite exit status: + # - 0 (success) when no patterns are found + # - 1 (fail) when the patterns are found + # + # This is useful for the CI, as we want to fail if one of the patterns + # that we want to avoid is found by grep. + if [[ "$AZURE" == "true" ]]; then + set -o pipefail + grep -n "$@" | awk -F ":" '{print "##vso[task.logissue type=error;sourcepath=" $1 ";linenumber=" $2 ";] Found unwanted pattern: " $3}' + else + grep "$@" + fi + return $((! $?)) +} + +if [[ "$AZURE" == "true" ]]; then + FLAKE8_FORMAT="##vso[task.logissue type=error;sourcepath=%(path)s;linenumber=%(row)s;columnnumber=%(col)s;code=%(code)s;]%(text)s" +else + FLAKE8_FORMAT="default" +fi ### LINTING ### if [[ -z "$CHECK" || "$CHECK" == "lint" ]]; then @@ -35,22 +58,22 @@ if [[ -z "$CHECK" || "$CHECK" == "lint" ]]; then # pandas/_libs/src is C code, so no need to search there. MSG='Linting .py code' ; echo $MSG - flake8 . + flake8 --format="$FLAKE8_FORMAT" . RET=$(($RET + $?)) ; echo $MSG "DONE" MSG='Linting .pyx code' ; echo $MSG - flake8 pandas --filename=*.pyx --select=E501,E302,E203,E111,E114,E221,E303,E128,E231,E126,E265,E305,E301,E127,E261,E271,E129,W291,E222,E241,E123,F403,C400,C401,C402,C403,C404,C405,C406,C407,C408,C409,C410,C411 + flake8 --format="$FLAKE8_FORMAT" pandas --filename=*.pyx --select=E501,E302,E203,E111,E114,E221,E303,E128,E231,E126,E265,E305,E301,E127,E261,E271,E129,W291,E222,E241,E123,F403,C400,C401,C402,C403,C404,C405,C406,C407,C408,C409,C410,C411 RET=$(($RET + $?)) ; echo $MSG "DONE" MSG='Linting .pxd and .pxi.in' ; echo $MSG - flake8 pandas/_libs --filename=*.pxi.in,*.pxd --select=E501,E302,E203,E111,E114,E221,E303,E231,E126,F403 + flake8 --format="$FLAKE8_FORMAT" pandas/_libs --filename=*.pxi.in,*.pxd --select=E501,E302,E203,E111,E114,E221,E303,E231,E126,F403 RET=$(($RET + $?)) ; echo $MSG "DONE" echo "flake8-rst --version" flake8-rst --version MSG='Linting code-blocks in .rst documentation' ; echo $MSG - flake8-rst doc/source --filename=*.rst + flake8-rst doc/source --filename=*.rst --format="$FLAKE8_FORMAT" RET=$(($RET + $?)) ; echo $MSG "DONE" # Check that cython casting is of the form `obj` as opposed to ` obj`; @@ -58,7 +81,7 @@ if [[ -z "$CHECK" || "$CHECK" == "lint" ]]; then # Note: this grep pattern is (intended to be) equivalent to the python # regex r'(?])> ' MSG='Linting .pyx code for spacing conventions in casting' ; echo $MSG - ! grep -r -E --include '*.pyx' --include '*.pxi.in' '[a-zA-Z0-9*]> ' pandas/_libs + invgrep -r -E --include '*.pyx' --include '*.pxi.in' '[a-zA-Z0-9*]> ' pandas/_libs RET=$(($RET + $?)) ; echo $MSG "DONE" # readability/casting: Warnings about C casting instead of C++ casting @@ -88,43 +111,48 @@ if [[ -z "$CHECK" || "$CHECK" == "patterns" ]]; then # Check for imports from pandas.core.common instead of `import pandas.core.common as com` MSG='Check for non-standard imports' ; echo $MSG - ! grep -R --include="*.py*" -E "from pandas.core.common import " pandas + invgrep -R --include="*.py*" -E "from pandas.core.common import " pandas RET=$(($RET + $?)) ; echo $MSG "DONE" MSG='Check for pytest warns' ; echo $MSG - ! grep -r -E --include '*.py' 'pytest\.warns' pandas/tests/ + invgrep -r -E --include '*.py' 'pytest\.warns' pandas/tests/ RET=$(($RET + $?)) ; echo $MSG "DONE" # Check for the following code in testing: `np.testing` and `np.array_equal` MSG='Check for invalid testing' ; echo $MSG - ! grep -r -E --include '*.py' --exclude testing.py '(numpy|np)(\.testing|\.array_equal)' pandas/tests/ + invgrep -r -E --include '*.py' --exclude testing.py '(numpy|np)(\.testing|\.array_equal)' pandas/tests/ RET=$(($RET + $?)) ; echo $MSG "DONE" # Check for the following code in the extension array base tests: `tm.assert_frame_equal` and `tm.assert_series_equal` MSG='Check for invalid EA testing' ; echo $MSG - ! grep -r -E --include '*.py' --exclude base.py 'tm.assert_(series|frame)_equal' pandas/tests/extension/base + invgrep -r -E --include '*.py' --exclude base.py 'tm.assert_(series|frame)_equal' pandas/tests/extension/base RET=$(($RET + $?)) ; echo $MSG "DONE" MSG='Check for deprecated messages without sphinx directive' ; echo $MSG - ! grep -R --include="*.py" --include="*.pyx" -E "(DEPRECATED|DEPRECATE|Deprecated)(:|,|\.)" pandas + invgrep -R --include="*.py" --include="*.pyx" -E "(DEPRECATED|DEPRECATE|Deprecated)(:|,|\.)" pandas RET=$(($RET + $?)) ; echo $MSG "DONE" MSG='Check for old-style classes' ; echo $MSG - ! grep -R --include="*.py" -E "class\s\S*[^)]:" pandas scripts + invgrep -R --include="*.py" -E "class\s\S*[^)]:" pandas scripts RET=$(($RET + $?)) ; echo $MSG "DONE" MSG='Check for backticks incorrectly rendering because of missing spaces' ; echo $MSG - ! grep -R --include="*.rst" -E "[a-zA-Z0-9]\`\`?[a-zA-Z0-9]" doc/source/ + invgrep -R --include="*.rst" -E "[a-zA-Z0-9]\`\`?[a-zA-Z0-9]" doc/source/ RET=$(($RET + $?)) ; echo $MSG "DONE" MSG='Check for incorrect sphinx directives' ; echo $MSG - ! grep -R --include="*.py" --include="*.pyx" --include="*.rst" -E "\.\. (autosummary|contents|currentmodule|deprecated|function|image|important|include|ipython|literalinclude|math|module|note|raw|seealso|toctree|versionadded|versionchanged|warning):[^:]" ./pandas ./doc/source + invgrep -R --include="*.py" --include="*.pyx" --include="*.rst" -E "\.\. (autosummary|contents|currentmodule|deprecated|function|image|important|include|ipython|literalinclude|math|module|note|raw|seealso|toctree|versionadded|versionchanged|warning):[^:]" ./pandas ./doc/source RET=$(($RET + $?)) ; echo $MSG "DONE" MSG='Check that the deprecated `assert_raises_regex` is not used (`pytest.raises(match=pattern)` should be used instead)' ; echo $MSG - ! grep -R --exclude=*.pyc --exclude=testing.py --exclude=test_testing.py assert_raises_regex pandas + invgrep -R --exclude=*.pyc --exclude=testing.py --exclude=test_testing.py assert_raises_regex pandas RET=$(($RET + $?)) ; echo $MSG "DONE" +fi + +### CODE ### +if [[ -z "$CHECK" || "$CHECK" == "code" ]]; then + MSG='Check for modules that pandas should not import' ; echo $MSG python -c " import sys @@ -135,7 +163,7 @@ blacklist = {'bs4', 'gcsfs', 'html5lib', 'ipython', 'jinja2' 'hypothesis', 'tables', 'xlrd', 'xlsxwriter', 'xlwt'} mods = blacklist & set(m.split('.')[0] for m in sys.modules) if mods: - sys.stderr.write('pandas should not import: {}\n'.format(', '.join(mods))) + sys.stderr.write('err: pandas should not import: {}\n'.format(', '.join(mods))) sys.exit(len(mods)) " RET=$(($RET + $?)) ; echo $MSG "DONE" @@ -157,7 +185,7 @@ if [[ -z "$CHECK" || "$CHECK" == "doctests" ]]; then MSG='Doctests generic.py' ; echo $MSG pytest -q --doctest-modules pandas/core/generic.py \ - -k"-_set_axis_name -_xs -describe -droplevel -groupby -interpolate -pct_change -pipe -reindex -reindex_axis -to_json -transpose -values -xs" + -k"-_set_axis_name -_xs -describe -droplevel -groupby -interpolate -pct_change -pipe -reindex -reindex_axis -to_json -transpose -values -xs -to_clipboard" RET=$(($RET + $?)) ; echo $MSG "DONE" MSG='Doctests top-level reshaping functions' ; echo $MSG @@ -178,11 +206,22 @@ if [[ -z "$CHECK" || "$CHECK" == "doctests" ]]; then fi +### DOCSTRINGS ### +if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then + + MSG='Validate docstrings (GL06, SS04, PR03, PR05, EX04)' ; echo $MSG + $BASE_DIR/scripts/validate_docstrings.py --format=azure --errors=GL06,SS04,PR03,PR05,EX04 + RET=$(($RET + $?)) ; echo $MSG "DONE" + +fi + ### DEPENDENCIES ### if [[ -z "$CHECK" || "$CHECK" == "dependencies" ]]; then + MSG='Check that requirements-dev.txt has been generated from environment.yml' ; echo $MSG - $BASE_DIR/scripts/generate_pip_deps_from_conda.py --compare + $BASE_DIR/scripts/generate_pip_deps_from_conda.py --compare --azure RET=$(($RET + $?)) ; echo $MSG "DONE" + fi exit $RET diff --git a/ci/deps/travis-36.yaml b/ci/deps/travis-36.yaml index de76f5d6d763f..bfd69652730ed 100644 --- a/ci/deps/travis-36.yaml +++ b/ci/deps/travis-36.yaml @@ -7,16 +7,9 @@ dependencies: - cython>=0.28.2 - dask - fastparquet - - flake8>=3.5 - - flake8-comprehensions - - flake8-rst>=0.6.0 - gcsfs - geopandas - html5lib - - ipython - - isort - - jinja2 - - lxml - matplotlib - nomkl - numexpr @@ -32,7 +25,6 @@ dependencies: - s3fs - scikit-learn - scipy - - seaborn - sqlalchemy - statsmodels - xarray @@ -48,6 +40,5 @@ dependencies: - pip: - brotlipy - coverage - - cpplint - pandas-datareader - python-dateutil diff --git a/environment.yml b/environment.yml index 4daaa90247fa8..e31511e5b8afe 100644 --- a/environment.yml +++ b/environment.yml @@ -10,6 +10,7 @@ dependencies: - pytz # development + - asv - cython>=0.28.2 - flake8 - flake8-comprehensions @@ -48,3 +49,5 @@ dependencies: - xlrd - xlsxwriter - xlwt + - pip: + - cpplint diff --git a/pandas/core/frame.py b/pandas/core/frame.py index f50be694b47c6..b9f32042924b9 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1392,10 +1392,6 @@ def to_gbq(self, destination_table, project_id=None, chunksize=None, If table exists, drop it, recreate it, and insert data. ``'append'`` If table exists, insert data. Create if does not exist. - private_key : str, optional - Service account private key in JSON format. Can be file path - or string contents. This is useful for remote server - authentication (eg. Jupyter/IPython notebook on remote host). auth_local_webserver : bool, default False Use the `local webserver flow`_ instead of the `console flow`_ when getting user credentials. diff --git a/pandas/core/panel.py b/pandas/core/panel.py index 65dfd45fcb9c2..bfa00d1352401 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -125,10 +125,10 @@ class Panel(NDFrame): axis=1 minor_axis : Index or array-like axis=2 - dtype : dtype, default None - Data type to force, otherwise infer copy : boolean, default False Copy data from inputs. Only affects DataFrame / 2d ndarray input + dtype : dtype, default None + Data type to force, otherwise infer """ @property diff --git a/pandas/core/tools/timedeltas.py b/pandas/core/tools/timedeltas.py index 7b0a3da738436..6bcf56c306e6a 100644 --- a/pandas/core/tools/timedeltas.py +++ b/pandas/core/tools/timedeltas.py @@ -50,7 +50,7 @@ def to_timedelta(arg, unit='ns', box=True, errors='raise'): timedelta64 or numpy.array of timedelta64 Output type returned if parsing succeeded. - See also + See Also -------- DataFrame.astype : Cast argument to a specified dtype. to_datetime : Convert argument to datetime. diff --git a/pandas/core/window.py b/pandas/core/window.py index faaef4211ca8e..68a36fb2a6999 100644 --- a/pandas/core/window.py +++ b/pandas/core/window.py @@ -30,15 +30,14 @@ _shared_docs = dict(**_shared_docs) _doc_template = """ + Returns + ------- + same type as input -Returns -------- -same type as input - -See Also --------- -pandas.Series.%(name)s -pandas.DataFrame.%(name)s + See Also + -------- + Series.%(name)s + DataFrame.%(name)s """ @@ -1340,23 +1339,25 @@ def f(arg, *args, **kwargs): return self._apply(f, 'quantile', quantile=quantile, **kwargs) - _shared_docs['cov'] = dedent(""" - Calculate the %(name)s sample covariance. + _shared_docs['cov'] = """ + Calculate the %(name)s sample covariance. - Parameters - ---------- - other : Series, DataFrame, or ndarray, optional - if not supplied then will default to self and produce pairwise output - pairwise : bool, default None - If False then only matching columns between self and other will be used - and the output will be a DataFrame. - If True then all pairwise combinations will be calculated and the - output will be a MultiIndexed DataFrame in the case of DataFrame - inputs. In the case of missing elements, only complete pairwise - observations will be used. - ddof : int, default 1 - Delta Degrees of Freedom. The divisor used in calculations - is ``N - ddof``, where ``N`` represents the number of elements.""") + Parameters + ---------- + other : Series, DataFrame, or ndarray, optional + If not supplied then will default to self and produce pairwise + output. + pairwise : bool, default None + If False then only matching columns between self and other will be + used and the output will be a DataFrame. + If True then all pairwise combinations will be calculated and the + output will be a MultiIndexed DataFrame in the case of DataFrame + inputs. In the case of missing elements, only complete pairwise + observations will be used. + ddof : int, default 1 + Delta Degrees of Freedom. The divisor used in calculations + is ``N - ddof``, where ``N`` represents the number of elements. + """ def cov(self, other=None, pairwise=None, ddof=1, **kwargs): if other is None: @@ -2054,28 +2055,27 @@ def _constructor(self): _bias_template = """ - -Parameters ----------- -bias : bool, default False - Use a standard estimation bias correction + Parameters + ---------- + bias : bool, default False + Use a standard estimation bias correction """ _pairwise_template = """ - -Parameters ----------- -other : Series, DataFrame, or ndarray, optional - if not supplied then will default to self and produce pairwise output -pairwise : bool, default None - If False then only matching columns between self and other will be used and - the output will be a DataFrame. - If True then all pairwise combinations will be calculated and the output - will be a MultiIndex DataFrame in the case of DataFrame inputs. - In the case of missing elements, only complete pairwise observations will - be used. -bias : bool, default False - Use a standard estimation bias correction + Parameters + ---------- + other : Series, DataFrame, or ndarray, optional + If not supplied then will default to self and produce pairwise + output. + pairwise : bool, default None + If False then only matching columns between self and other will be + used and the output will be a DataFrame. + If True then all pairwise combinations will be calculated and the + output will be a MultiIndex DataFrame in the case of DataFrame + inputs. In the case of missing elements, only complete pairwise + observations will be used. + bias : bool, default False + Use a standard estimation bias correction """ diff --git a/pandas/io/gbq.py b/pandas/io/gbq.py index 4d5b2fda7cd10..639b68d433ac6 100644 --- a/pandas/io/gbq.py +++ b/pandas/io/gbq.py @@ -52,10 +52,6 @@ def read_gbq(query, project_id=None, index_col=None, col_order=None, reauth : boolean, default False Force Google BigQuery to re-authenticate the user. This is useful if multiple accounts are used. - private_key : str, optional - Service account private key in JSON format. Can be file path - or string contents. This is useful for remote server - authentication (eg. Jupyter/IPython notebook on remote host). auth_local_webserver : boolean, default False Use the `local webserver flow`_ instead of the `console flow`_ when getting user credentials. @@ -107,10 +103,6 @@ def read_gbq(query, project_id=None, index_col=None, col_order=None, *New in version 0.8.0 of pandas-gbq*. .. versionadded:: 0.24.0 - verbose : None, deprecated - Deprecated in pandas-gbq version 0.4.0. Use the `logging module to - adjust verbosity instead - `__. private_key : str, deprecated Deprecated in pandas-gbq version 0.8.0. Use the ``credentials`` parameter and @@ -122,6 +114,10 @@ def read_gbq(query, project_id=None, index_col=None, col_order=None, Service account private key in JSON format. Can be file path or string contents. This is useful for remote server authentication (eg. Jupyter/IPython notebook on remote host). + verbose : None, deprecated + Deprecated in pandas-gbq version 0.4.0. Use the `logging module to + adjust verbosity instead + `__. Returns ------- diff --git a/pandas/io/json/json.py b/pandas/io/json/json.py index 630943f4ec1bb..21c8064ebcac5 100644 --- a/pandas/io/json/json.py +++ b/pandas/io/json/json.py @@ -311,13 +311,13 @@ def read_json(path_or_buf=None, orient=None, typ='frame', dtype=True, is to try and detect the correct precision, but if this is not desired then pass one of 's', 'ms', 'us' or 'ns' to force parsing only seconds, milliseconds, microseconds or nanoseconds respectively. - lines : boolean, default False - Read the file as a json object per line. + encoding : str, default is 'utf-8' + The encoding to use to decode py3 bytes. .. versionadded:: 0.19.0 - encoding : str, default is 'utf-8' - The encoding to use to decode py3 bytes. + lines : boolean, default False + Read the file as a json object per line. .. versionadded:: 0.19.0 diff --git a/requirements-dev.txt b/requirements-dev.txt index 5e2da69df5f26..facadf384f770 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -1,6 +1,7 @@ numpy>=1.15 python-dateutil>=2.5.0 pytz +asv cython>=0.28.2 flake8 flake8-comprehensions @@ -36,4 +37,5 @@ statsmodels xarray xlrd xlsxwriter -xlwt \ No newline at end of file +xlwt +cpplint \ No newline at end of file diff --git a/scripts/generate_pip_deps_from_conda.py b/scripts/generate_pip_deps_from_conda.py index 1f79b23a259dc..7b6eb1f9a32b5 100755 --- a/scripts/generate_pip_deps_from_conda.py +++ b/scripts/generate_pip_deps_from_conda.py @@ -75,7 +75,18 @@ def main(conda_fname, pip_fname, compare=False): with open(conda_fname) as conda_fd: deps = yaml.safe_load(conda_fd)['dependencies'] - pip_content = '\n'.join(filter(None, map(conda_package_to_pip, deps))) + pip_deps = [] + for dep in deps: + if isinstance(dep, str): + conda_dep = conda_package_to_pip(dep) + if conda_dep: + pip_deps.append(conda_dep) + elif isinstance(dep, dict) and len(dep) == 1 and 'pip' in dep: + pip_deps += dep['pip'] + else: + raise ValueError('Unexpected dependency {}'.format(dep)) + + pip_content = '\n'.join(pip_deps) if compare: with open(pip_fname) as pip_fd: @@ -92,6 +103,9 @@ def main(conda_fname, pip_fname, compare=False): argparser.add_argument('--compare', action='store_true', help='compare whether the two files are equivalent') + argparser.add_argument('--azure', + action='store_true', + help='show the output in azure-pipelines format') args = argparser.parse_args() repo_path = os.path.dirname(os.path.abspath(os.path.dirname(__file__))) @@ -99,7 +113,10 @@ def main(conda_fname, pip_fname, compare=False): os.path.join(repo_path, 'requirements-dev.txt'), compare=args.compare) if res: - sys.stderr.write('`requirements-dev.txt` has to be generated with ' - '`{}` after `environment.yml` is modified.\n'.format( - sys.argv[0])) + msg = ('`requirements-dev.txt` has to be generated with `{}` after ' + '`environment.yml` is modified.\n'.format(sys.argv[0])) + if args.azure: + msg = ('##vso[task.logissue type=error;' + 'sourcepath=requirements-dev.txt]{}'.format(msg)) + sys.stderr.write(msg) sys.exit(res)