diff --git a/.travis.yml b/.travis.yml index 7fa6b330..b114a596 100644 --- a/.travis.yml +++ b/.travis.yml @@ -5,33 +5,30 @@ language: python matrix: fast_finish: true include: - - os: linux + - dist: trusty env: - PYTHON=2.7 PANDAS=0.17.1 - - os: linux + - dist: trusty env: - PYTHON=2.7 PANDAS=0.19.2 - - os: linux + - dist: trusty env: - PYTHON=3.5 PANDAS=0.17.1 - - os: linux + - dist: trusty env: - PYTHON=3.5 PANDAS=0.18.1 - - os: linux - env: - - PYTHON=3.5 PANDAS=0.19.2 - - os: linux + - dist: trusty env: - PYTHON=3.6 PANDAS=0.19.2 - - os: linux + - dist: trusty env: - - PYTHON=3.6 PANDAS=0.20.1 + - PYTHON=3.6 PANDAS=0.20.2 # In allow failures - - os: linux + - dist: trusty env: - PYTHON=3.6 PANDAS="MASTER" allow_failures: - - os: linux + - dist: trusty env: - PYTHON=3.6 PANDAS="MASTER" @@ -70,7 +67,7 @@ install: script: - export ENIGMA_API_KEY=$ENIGMA_API_KEY - - pytest -s --cov=pandas_datareader --cov-report xml:/tmp/cov-datareader.xml --junitxml=/tmp/datareader.xml + - pytest -s -r xX --cov=pandas_datareader --cov-report xml:/tmp/cov-datareader.xml --junitxml=/tmp/datareader.xml - flake8 --version - flake8 pandas_datareader diff --git a/docs/source/whatsnew.rst b/docs/source/whatsnew.rst index e2b33d68..af7b4330 100644 --- a/docs/source/whatsnew.rst +++ b/docs/source/whatsnew.rst @@ -18,6 +18,7 @@ What's New These are new features and improvements of note in each release. +.. include:: whatsnew/v0.5.0.txt .. include:: whatsnew/v0.4.0.txt .. include:: whatsnew/v0.3.0.txt .. include:: whatsnew/v0.2.1.txt diff --git a/docs/source/whatsnew/v0.5.0.txt b/docs/source/whatsnew/v0.5.0.txt new file mode 100644 index 00000000..ef7f0212 --- /dev/null +++ b/docs/source/whatsnew/v0.5.0.txt @@ -0,0 +1,29 @@ +.. _whatsnew_050: + +v0.5.0 (July ??, 2017) +---------------------- + +This is a major release from 0.4.0. + +Highlights include: + +.. contents:: What's new in v0.5.0 + :local: + :backlinks: none + +.. _whatsnew_050.enhancements: + +Enhancements +~~~~~~~~~~~~ + +- Compat with new Yahoo API (:issue:`315`) + +.. _whatsnew_050.bug_fixes: + +Bug Fixes +~~~~~~~~~ + +- web sessions are closed properly at the end of use (:issue:`355`) +- Handle commas in large price quotes (:issue:`345`) +- Test suite fixes for test_get_options_data (:issue:`352`) +- Test suite fixes for test_wdi_download (:issue:`350`) diff --git a/pandas_datareader/base.py b/pandas_datareader/base.py index 3f0faf03..869d03e8 100644 --- a/pandas_datareader/base.py +++ b/pandas_datareader/base.py @@ -53,8 +53,13 @@ def __init__(self, symbols, start=None, end=None, self.retry_count = retry_count self.pause = pause self.timeout = timeout + self.pause_multiplier = 1 self.session = _init_session(session, retry_count) + def close(self): + """ close my session """ + self.session.close() + @property def url(self): # must be overridden in subclass @@ -66,7 +71,10 @@ def params(self): def read(self): """ read data """ - return self._read_one_data(self.url, self.params) + try: + return self._read_one_data(self.url, self.params) + finally: + self.close() def _read_one_data(self, url, params): """ read one data from specified URL """ @@ -85,6 +93,10 @@ def _read_url_as_StringIO(self, url, params=None): response = self._get_response(url, params=params) text = self._sanitize_response(response) out = StringIO() + if len(text) == 0: + service = self.__class__.__name__ + raise IOError("{} request returned no data; check URL for invalid " + "inputs: {}".format(service, self.url)) if isinstance(text, compat.binary_type): out.write(bytes_to_str(text)) else: @@ -99,7 +111,7 @@ def _sanitize_response(response): """ return response.content - def _get_response(self, url, params=None): + def _get_response(self, url, params=None, headers=None): """ send raw HTTP request to get requests.Response from the specified url Parameters ---------- @@ -110,15 +122,29 @@ def _get_response(self, url, params=None): """ # initial attempt + retry + pause = self.pause for i in range(self.retry_count + 1): - response = self.session.get(url, params=params) + response = self.session.get(url, + params=params, + headers=headers) if response.status_code == requests.codes.ok: return response - time.sleep(self.pause) + + time.sleep(pause) + + # Increase time between subsequent requests, per subclass. + pause *= self.pause_multiplier + # Get a new breadcrumb if necessary, in case ours is invalidated + if isinstance(params, list) and 'crumb' in params: + params['crumb'] = self._get_crumb(self.retry_count) if params is not None and len(params) > 0: url = url + "?" + urlencode(params) raise RemoteDataError('Unable to read URL: {0}'.format(url)) + def _get_crumb(self, *args): + """ To be implemented by subclass """ + raise NotImplementedError("Subclass has not implemented method.") + def _read_lines(self, out): rs = read_csv(out, index_col=0, parse_dates=True, na_values='-')[::-1] # Yahoo! Finance sometimes does this awesome thing where they diff --git a/pandas_datareader/data.py b/pandas_datareader/data.py index da090872..08de52c1 100644 --- a/pandas_datareader/data.py +++ b/pandas_datareader/data.py @@ -9,7 +9,7 @@ from pandas_datareader.yahoo.daily import YahooDailyReader from pandas_datareader.yahoo.quotes import YahooQuotesReader -from pandas_datareader.yahoo.actions import YahooActionReader +from pandas_datareader.yahoo.actions import (YahooActionReader, YahooDivReader) from pandas_datareader.yahoo.components import _get_data as get_components_yahoo # noqa from pandas_datareader.yahoo.options import Options as YahooOptions from pandas_datareader.google.options import Options as GoogleOptions @@ -121,10 +121,10 @@ def DataReader(name, data_source=None, start=None, end=None, retry_count=retry_count, pause=pause, session=session).read() elif data_source == "yahoo-dividends": - return YahooDailyReader(symbols=name, start=start, end=end, - adjust_price=False, chunksize=25, - retry_count=retry_count, pause=pause, - session=session, interval='v').read() + return YahooDivReader(symbols=name, start=start, end=end, + adjust_price=False, chunksize=25, + retry_count=retry_count, pause=pause, + session=session, interval='d').read() elif data_source == "google": return GoogleDailyReader(symbols=name, start=start, end=end, diff --git a/pandas_datareader/edgar.py b/pandas_datareader/edgar.py index a897df8e..7f7dc443 100644 --- a/pandas_datareader/edgar.py +++ b/pandas_datareader/edgar.py @@ -150,6 +150,12 @@ def _fix_old_file_paths(self, path): return path def read(self): + try: + return self._read() + finally: + self.close() + + def _read(self): try: self._sec_ftp_session = FTP(_SEC_FTP, timeout=self.timeout) self._sec_ftp_session.login() diff --git a/pandas_datareader/enigma.py b/pandas_datareader/enigma.py index 657a6f91..5efbaef2 100644 --- a/pandas_datareader/enigma.py +++ b/pandas_datareader/enigma.py @@ -100,6 +100,12 @@ def extract_export_url(self, delay=10, max_attempts=10): return resp.json()[self.export_key] def read(self): + try: + return self._read() + finally: + self.close() + + def _read(self): export_gzipped_req = self._request(self.extract_export_url()) decompressed_data = self._decompress_export( export_gzipped_req.content).decode("utf-8") diff --git a/pandas_datareader/fred.py b/pandas_datareader/fred.py index 7f7bf7d0..4f3e4e9e 100644 --- a/pandas_datareader/fred.py +++ b/pandas_datareader/fred.py @@ -20,6 +20,12 @@ def url(self): return "http://research.stlouisfed.org/fred2/series/" def read(self): + try: + return self._read() + finally: + self.close() + + def _read(self): if not is_list_like(self.symbols): names = [self.symbols] else: diff --git a/pandas_datareader/tests/google/test_google.py b/pandas_datareader/tests/google/test_google.py index f7e6f2e5..47913a2c 100644 --- a/pandas_datareader/tests/google/test_google.py +++ b/pandas_datareader/tests/google/test_google.py @@ -83,13 +83,13 @@ def assert_option_result(self, df): def test_get_quote_string(self): df = web.get_quote_google('GOOG') - assert df.ix['GOOG']['last'] > 0.0 + assert df.loc['GOOG', 'last'] > 0.0 tm.assert_index_equal(df.index, pd.Index(['GOOG'])) self.assert_option_result(df) def test_get_quote_stringlist(self): df = web.get_quote_google(['GOOG', 'AMZN', 'GOOG']) - assert_series_equal(df.ix[0], df.ix[2]) + assert_series_equal(df.iloc[0], df.iloc[2]) tm.assert_index_equal(df.index, pd.Index(['GOOG', 'AMZN', 'GOOG'])) self.assert_option_result(df) @@ -97,7 +97,7 @@ def test_get_goog_volume(self): for locale in self.locales: with tm.set_locale(locale): df = web.get_data_google('GOOG').sort_index() - assert df.Volume.ix['JAN-02-2015'] == 1446662 + assert df.Volume.loc['JAN-02-2015'] == 1446662 def test_get_multi1(self): for locale in self.locales: @@ -130,13 +130,13 @@ def test_get_multi2(self): with tm.set_locale(locale): pan = web.get_data_google(['GE', 'MSFT', 'INTC'], 'JAN-01-12', 'JAN-31-12') - result = pan.Close.ix['01-18-12'] + result = pan.Close.loc['01-18-12'] assert_n_failed_equals_n_null_columns(w, result) # sanity checking assert np.issubdtype(result.dtype, np.floating) - result = pan.Open.ix['Jan-15-12':'Jan-20-12'] + result = pan.Open.loc['Jan-15-12':'Jan-20-12'] assert result.shape == (4, 3) assert_n_failed_equals_n_null_columns(w, result) @@ -158,7 +158,7 @@ def test_unicode_date(self): def test_google_reader_class(self): r = GoogleDailyReader('GOOG') df = r.read() - assert df.Volume.ix['JAN-02-2015'] == 1446662 + assert df.Volume.loc['JAN-02-2015'] == 1446662 session = requests.Session() r = GoogleDailyReader('GOOG', session=session) diff --git a/pandas_datareader/tests/google/test_options.py b/pandas_datareader/tests/google/test_options.py index 7310fb51..22dce42d 100644 --- a/pandas_datareader/tests/google/test_options.py +++ b/pandas_datareader/tests/google/test_options.py @@ -7,8 +7,6 @@ import pandas.util.testing as tm import pandas_datareader.data as web -from pandas_datareader._utils import RemoteDataError -from pandas_datareader._testing import skip_on_exception class TestGoogleOptions(object): @@ -18,7 +16,6 @@ def setup_class(cls): # GOOG has monthlies cls.goog = web.Options('GOOG', 'google') - @skip_on_exception(RemoteDataError) def test_get_options_data(self): options = self.goog.get_options_data(expiry=self.goog.expiry_dates[0]) @@ -46,7 +43,6 @@ def test_get_options_data_yearmonth(self): with pytest.raises(NotImplementedError): self.goog.get_options_data(month=1, year=2016) - @skip_on_exception(RemoteDataError) def test_expiry_dates(self): dates = self.goog.expiry_dates diff --git a/pandas_datareader/tests/io/test_jsdmx.py b/pandas_datareader/tests/io/test_jsdmx.py index 35fb3e3d..0f292378 100644 --- a/pandas_datareader/tests/io/test_jsdmx.py +++ b/pandas_datareader/tests/io/test_jsdmx.py @@ -50,7 +50,7 @@ def test_land_use(self): result = read_jsdmx(os.path.join(self.dirpath, 'jsdmx', 'land_use.json')) assert isinstance(result, pd.DataFrame) - result = result.ix['2010':'2011'] + result = result.loc['2010':'2011'] exp_col = pd.MultiIndex.from_product([ ['Japan', 'United States'], diff --git a/pandas_datareader/tests/test_data.py b/pandas_datareader/tests/test_data.py index d34f9c1e..28bf0d9c 100644 --- a/pandas_datareader/tests/test_data.py +++ b/pandas_datareader/tests/test_data.py @@ -4,6 +4,8 @@ import pandas_datareader.data as web from pandas import DataFrame +from pandas_datareader._utils import RemoteDataError +from pandas_datareader._testing import skip_on_exception from pandas_datareader.data import DataReader @@ -15,10 +17,13 @@ def test_options_source_warning(self): class TestDataReader(object): + + @skip_on_exception(RemoteDataError) def test_read_yahoo(self): gs = DataReader("GS", "yahoo") assert isinstance(gs, DataFrame) + @pytest.mark.xfail(RemoteDataError, reason="failing after #355") def test_read_yahoo_dividends(self): gs = DataReader("GS", "yahoo-dividends") assert isinstance(gs, DataFrame) diff --git a/pandas_datareader/tests/test_edgar.py b/pandas_datareader/tests/test_edgar.py index 853f937f..b911469e 100644 --- a/pandas_datareader/tests/test_edgar.py +++ b/pandas_datareader/tests/test_edgar.py @@ -4,9 +4,6 @@ import pandas.util.testing as tm import pandas_datareader.data as web -from pandas_datareader._utils import RemoteDataError -from pandas_datareader._testing import skip_on_exception - class TestEdgarIndex(object): @@ -16,7 +13,6 @@ def setup_class(cls): # Disabling tests until re-write. pytest.skip("Disabling tests until re-write.") - @skip_on_exception(RemoteDataError) def test_get_full_index(self): ed = web.DataReader('full', 'edgar-index') assert len(ed) > 1000 @@ -25,7 +21,6 @@ def test_get_full_index(self): 'date_filed', 'filename'], dtype='object') tm.assert_index_equal(ed.columns, exp_columns) - @skip_on_exception(RemoteDataError) def test_get_nonzip_index_and_low_date(self): ed = web.DataReader('daily', 'edgar-index', '1994-06-30', '1994-07-02') @@ -38,14 +33,12 @@ def test_get_nonzip_index_and_low_date(self): 'filename'], dtype='object') tm.assert_index_equal(ed.columns, exp_columns) - @skip_on_exception(RemoteDataError) def test_get_gz_index_and_no_date(self): # TODO: Rewrite, as this test causes Travis to timeout. ed = web.DataReader('daily', 'edgar-index') assert len(ed) > 2000 - @skip_on_exception(RemoteDataError) def test_6_digit_date(self): ed = web.DataReader('daily', 'edgar-index', start='1998-05-18', end='1998-05-18') diff --git a/pandas_datareader/tests/test_enigma.py b/pandas_datareader/tests/test_enigma.py index 69f338ec..aaf0b8f9 100644 --- a/pandas_datareader/tests/test_enigma.py +++ b/pandas_datareader/tests/test_enigma.py @@ -2,31 +2,34 @@ import pytest from requests.exceptions import HTTPError - import pandas_datareader as pdr import pandas_datareader.data as web -from pandas_datareader._testing import skip_on_exception TEST_API_KEY = os.getenv('ENIGMA_API_KEY') +@pytest.mark.skipif(TEST_API_KEY is None, reason="no enigma_api_key") class TestEnigma(object): @classmethod def setup_class(cls): pytest.importorskip("lxml") - @skip_on_exception(HTTPError) def test_enigma_datareader(self): - df = web.DataReader('enigma.inspections.restaurants.fl', - 'enigma', access_key=TEST_API_KEY) - assert 'serialid' in df.columns + try: + df = web.DataReader('enigma.inspections.restaurants.fl', + 'enigma', access_key=TEST_API_KEY) + assert 'serialid' in df.columns + except HTTPError as e: + pytest.skip(e) - @skip_on_exception(HTTPError) def test_enigma_get_data_enigma(self): - df = pdr.get_data_enigma( - 'enigma.inspections.restaurants.fl', TEST_API_KEY) - assert 'serialid' in df.columns + try: + df = pdr.get_data_enigma( + 'enigma.inspections.restaurants.fl', TEST_API_KEY) + assert 'serialid' in df.columns + except HTTPError as e: + pytest.skip(e) def test_bad_key(self): with pytest.raises(HTTPError): diff --git a/pandas_datareader/tests/test_eurostat.py b/pandas_datareader/tests/test_eurostat.py index 58bff5f3..830d08a3 100644 --- a/pandas_datareader/tests/test_eurostat.py +++ b/pandas_datareader/tests/test_eurostat.py @@ -3,14 +3,11 @@ import pandas.util.testing as tm import pandas_datareader.data as web -from pandas_datareader._utils import RemoteDataError from pandas_datareader.compat import assert_raises_regex -from pandas_datareader._testing import skip_on_exception class TestEurostat(object): - @skip_on_exception(RemoteDataError) def test_get_cdh_e_fos(self): # Employed doctorate holders in non managerial and non professional # occupations by fields of science (%) @@ -35,7 +32,6 @@ def test_get_cdh_e_fos(self): expected = pd.DataFrame(values, index=exp_idx, columns=exp_col) tm.assert_frame_equal(df, expected) - @skip_on_exception(RemoteDataError) def test_get_sts_cobp_a(self): # Building permits - annual data (2010 = 100) df = web.DataReader('sts_cobp_a', 'eurostat', @@ -68,7 +64,6 @@ def test_get_sts_cobp_a(self): result = df[expected.name] tm.assert_series_equal(result, expected) - @skip_on_exception(RemoteDataError) def test_get_nrg_pc_202(self): # see gh-149 @@ -91,7 +86,6 @@ def test_get_nrg_pc_202(self): tm.assert_series_equal(df[name], exp) - @skip_on_exception(RemoteDataError) def test_get_prc_hicp_manr_exceeds_limit(self): # see gh-149 msg = 'Query size exceeds maximum limit' diff --git a/pandas_datareader/tests/test_fred.py b/pandas_datareader/tests/test_fred.py index 351b796c..ac3d3eb5 100644 --- a/pandas_datareader/tests/test_fred.py +++ b/pandas_datareader/tests/test_fred.py @@ -38,14 +38,14 @@ def test_fred_nan(self): start = datetime(2010, 1, 1) end = datetime(2013, 1, 27) df = web.DataReader("DFII5", "fred", start, end) - assert pd.isnull(df.ix['2010-01-01'][0]) + assert pd.isnull(df.loc['2010-01-01'][0]) @pytest.mark.skip(reason='Buggy as of 2/18/14; maybe a data revision?') def test_fred_parts(self): # pragma: no cover start = datetime(2010, 1, 1) end = datetime(2013, 1, 27) df = web.get_data_fred("CPIAUCSL", start, end) - assert df.ix['2010-05-01'][0] == 217.23 + assert df.loc['2010-05-01'][0] == 217.23 t = df.CPIAUCSL.values assert np.issubdtype(t.dtype, np.floating) @@ -57,7 +57,7 @@ def test_fred_part2(self): [684.7], [848.3], [933.3]] - result = web.get_data_fred("A09024USA144NNBR", start="1915").ix[:5] + result = web.get_data_fred("A09024USA144NNBR", start="1915").iloc[:5] tm.assert_numpy_array_equal(result.values, np.array(expected)) def test_invalid_series(self): diff --git a/pandas_datareader/tests/test_nasdaq.py b/pandas_datareader/tests/test_nasdaq.py index ea7a61c9..c25ef410 100644 --- a/pandas_datareader/tests/test_nasdaq.py +++ b/pandas_datareader/tests/test_nasdaq.py @@ -1,12 +1,8 @@ import pandas_datareader.data as web -from pandas_datareader._utils import RemoteDataError -from pandas_datareader._testing import skip_on_exception - class TestNasdaqSymbols(object): - @skip_on_exception(RemoteDataError) def test_get_symbols(self): symbols = web.DataReader('symbols', 'nasdaq') assert 'IBM' in symbols.index diff --git a/pandas_datareader/tests/test_wb.py b/pandas_datareader/tests/test_wb.py index 9e5dcd33..689ddf1b 100644 --- a/pandas_datareader/tests/test_wb.py +++ b/pandas_datareader/tests/test_wb.py @@ -8,7 +8,6 @@ import pandas.util.testing as tm from pandas_datareader.wb import (search, download, get_countries, get_indicators, WorldBankReader) -from pandas_datareader._testing import skip_on_exception from pandas_datareader.compat import assert_raises_regex @@ -143,7 +142,6 @@ def test_wdi_download_error_handling(self): assert isinstance(result, pd.DataFrame) assert len(result) == 2 - @skip_on_exception(ValueError) def test_wdi_download_w_retired_indicator(self): cntry_codes = ['CA', 'MX', 'US'] @@ -160,28 +158,33 @@ def test_wdi_download_w_retired_indicator(self): inds = ['GDPPCKD'] - result = download(country=cntry_codes, indicator=inds, - start=2003, end=2004, errors='ignore') + with pytest.raises(ValueError): + result = download(country=cntry_codes, indicator=inds, + start=2003, end=2004, errors='ignore') - # If it ever gets here, it means WB unretired the indicator. - # even if they dropped it completely, it would still get caught above - # or the WB API changed somehow in a really unexpected way. - if len(result) > 0: # pragma: no cover - pytest.skip("Invalid results") + # If it ever gets here, it means WB unretired the indicator. + # even if they dropped it completely, it would still + # get caught above + # or the WB API changed somehow in a really + # unexpected way. + if len(result) > 0: # pragma: no cover + pytest.skip("Invalid results") - @skip_on_exception(ValueError) def test_wdi_download_w_crash_inducing_countrycode(self): cntry_codes = ['CA', 'MX', 'US', 'XXX'] inds = ['NY.GDP.PCAP.CD'] - result = download(country=cntry_codes, indicator=inds, - start=2003, end=2004, errors='ignore') - - # If it ever gets here, it means the country code XXX got used by WB - # or the WB API changed somehow in a really unexpected way. - if len(result) > 0: # pragma: no cover - pytest.skip("Invalid results") + with pytest.raises(ValueError): + result = download(country=cntry_codes, indicator=inds, + start=2003, end=2004, errors='ignore') + + # If it ever gets here, it means the country code XXX + # got used by WB + # or the WB API changed somehow in a really + # unexpected way. + if len(result) > 0: # pragma: no cover + pytest.skip("Invalid results") def test_wdi_get_countries(self): result1 = get_countries() diff --git a/pandas_datareader/tests/yahoo/test_options.py b/pandas_datareader/tests/yahoo/test_options.py index c5f58497..ba4ff077 100644 --- a/pandas_datareader/tests/yahoo/test_options.py +++ b/pandas_datareader/tests/yahoo/test_options.py @@ -8,37 +8,70 @@ import pandas.util.testing as tm import pandas_datareader.data as web -from pandas_datareader._utils import RemoteDataError -from pandas_datareader._testing import skip_on_exception -class TestYahooOptions(object): +@pytest.yield_fixture +def aapl(): + aapl = web.Options('aapl', 'yahoo') + yield aapl + aapl.close() + + +@pytest.fixture +def month(): + + # AAPL has monthlies + today = datetime.today() + month = today.month + 1 + + if month > 12: # pragma: no cover + month = 1 + + return month + + +@pytest.fixture +def year(): + + # AAPL has monthlies + today = datetime.today() + year = today.year + month = today.month + 1 - @classmethod - def setup_class(cls): - # AAPL has monthlies - cls.aapl = web.Options('aapl', 'yahoo') - today = datetime.today() - cls.year = today.year - cls.month = today.month + 1 + if month > 12: # pragma: no cover + year = year + 1 - if cls.month > 12: # pragma: no cover - cls.month = 1 - cls.year = cls.year + 1 + return year - cls.expiry = datetime(cls.year, cls.month, 1) - cls.dirpath = tm.get_data_path() - cls.json1 = 'file://' + os.path.join( - cls.dirpath, 'yahoo_options1.json') - # see gh-22: empty table - cls.json2 = 'file://' + os.path.join( - cls.dirpath, 'yahoo_options2.json') - cls.data1 = cls.aapl._process_data(cls.aapl._parse_url(cls.json1)) +@pytest.fixture +def expiry(month, year): + return datetime(year, month, 1) - @classmethod - def teardown_class(cls): - del cls.aapl, cls.expiry + +@pytest.fixture +def json1(): + dirpath = tm.get_data_path() + json1 = 'file://' + os.path.join( + dirpath, 'yahoo_options1.json') + return json1 + + +@pytest.fixture +def json2(): + # see gh-22: empty table + dirpath = tm.get_data_path() + json2 = 'file://' + os.path.join( + dirpath, 'yahoo_options2.json') + return json2 + + +@pytest.fixture +def data1(aapl, json1): + return aapl._process_data(aapl._parse_url(json1)) + + +class TestYahooOptions(object): def assert_option_result(self, df): """ @@ -59,70 +92,61 @@ def assert_option_result(self, df): 'datetime64[ns]', 'datetime64[ns]', 'object']] tm.assert_series_equal(df.dtypes, pd.Series(dtypes, index=exp_columns)) - @skip_on_exception(RemoteDataError) - def test_get_options_data(self): + def test_get_options_data(self, aapl, expiry): # see gh-6105: regression test with pytest.raises(ValueError): - self.aapl.get_options_data(month=3) + aapl.get_options_data(month=3) with pytest.raises(ValueError): - self.aapl.get_options_data(year=1992) + aapl.get_options_data(year=1992) - options = self.aapl.get_options_data(expiry=self.expiry) + options = aapl.get_options_data(expiry=expiry) self.assert_option_result(options) - @skip_on_exception(RemoteDataError) - def test_get_near_stock_price(self): - options = self.aapl.get_near_stock_price(call=True, put=True, - expiry=self.expiry) + def test_get_near_stock_price(self, aapl, expiry): + options = aapl.get_near_stock_price(call=True, put=True, + expiry=expiry) self.assert_option_result(options) def test_options_is_not_none(self): option = web.Options('aapl', 'yahoo') assert option is not None - @skip_on_exception(RemoteDataError) - def test_get_call_data(self): - calls = self.aapl.get_call_data(expiry=self.expiry) + def test_get_call_data(self, aapl, expiry): + calls = aapl.get_call_data(expiry=expiry) self.assert_option_result(calls) assert calls.index.levels[2][0] == 'call' - @skip_on_exception(RemoteDataError) - def test_get_put_data(self): - puts = self.aapl.get_put_data(expiry=self.expiry) + def test_get_put_data(self, aapl, expiry): + puts = aapl.get_put_data(expiry=expiry) self.assert_option_result(puts) assert puts.index.levels[2][1] == 'put' - @skip_on_exception(RemoteDataError) - def test_get_expiry_dates(self): - dates = self.aapl._get_expiry_dates() + def test_get_expiry_dates(self, aapl): + dates = aapl._get_expiry_dates() assert len(dates) > 1 - @skip_on_exception(RemoteDataError) - def test_get_all_data(self): - data = self.aapl.get_all_data(put=True) + def test_get_all_data(self, aapl): + data = aapl.get_all_data(put=True) assert len(data) > 1 self.assert_option_result(data) - @skip_on_exception(RemoteDataError) - def test_get_data_with_list(self): - data = self.aapl.get_call_data(expiry=self.aapl.expiry_dates) + def test_get_data_with_list(self, aapl): + data = aapl.get_call_data(expiry=aapl.expiry_dates) assert len(data) > 1 self.assert_option_result(data) - @skip_on_exception(RemoteDataError) - def test_get_all_data_calls_only(self): - data = self.aapl.get_all_data(call=True, put=False) + def test_get_all_data_calls_only(self, aapl): + data = aapl.get_all_data(call=True, put=False) assert len(data) > 1 self.assert_option_result(data) - @skip_on_exception(RemoteDataError) - def test_get_underlying_price(self): + def test_get_underlying_price(self, aapl): # see gh-7 options_object = web.Options('^spxpm', 'yahoo') quote_price = options_object.underlying_price @@ -130,52 +154,51 @@ def test_get_underlying_price(self): assert isinstance(quote_price, float) # Tests the weekend quote time format - price, quote_time = self.aapl.underlying_price, self.aapl.quote_time + price, quote_time = aapl.underlying_price, aapl.quote_time assert isinstance(price, (int, float, complex)) assert isinstance(quote_time, (datetime, pd.Timestamp)) - def test_chop(self): + def test_chop(self, aapl, data1): # gh-7625: regression test - self.aapl._chop_data(self.data1, above_below=2, - underlying_price=np.nan) - chopped = self.aapl._chop_data(self.data1, above_below=2, - underlying_price=100) + aapl._chop_data(data1, above_below=2, + underlying_price=np.nan) + chopped = aapl._chop_data(data1, above_below=2, + underlying_price=100) assert isinstance(chopped, pd.DataFrame) assert len(chopped) > 1 - chopped2 = self.aapl._chop_data(self.data1, above_below=2, - underlying_price=None) + chopped2 = aapl._chop_data(data1, above_below=2, + underlying_price=None) assert isinstance(chopped2, pd.DataFrame) assert len(chopped2) > 1 - def test_chop_out_of_strike_range(self): + def test_chop_out_of_strike_range(self, aapl, data1): # gh-7625: regression test - self.aapl._chop_data(self.data1, above_below=2, - underlying_price=np.nan) - chopped = self.aapl._chop_data(self.data1, above_below=2, - underlying_price=100000) + aapl._chop_data(data1, above_below=2, + underlying_price=np.nan) + chopped = aapl._chop_data(data1, above_below=2, + underlying_price=100000) assert isinstance(chopped, pd.DataFrame) assert len(chopped) > 1 - def test_sample_page_chg_float(self): + def test_sample_page_chg_float(self, data1): # Tests that numeric columns with comma's are appropriately dealt with - assert self.data1['Chg'].dtype == 'float64' + assert data1['Chg'].dtype == 'float64' - @skip_on_exception(RemoteDataError) - def test_month_year(self): + def test_month_year(self, aapl, month, year): # see gh-168 - data = self.aapl.get_call_data(month=self.month, year=self.year) + data = aapl.get_call_data(month=month, year=year) assert len(data) > 1 assert data.index.levels[0].dtype == 'float64' self.assert_option_result(data) - def test_empty_table(self): + def test_empty_table(self, aapl, json2): # see gh-22 - empty = self.aapl._process_data(self.aapl._parse_url(self.json2)) + empty = aapl._process_data(aapl._parse_url(json2)) assert len(empty) == 0 diff --git a/pandas_datareader/tests/yahoo/test_yahoo.py b/pandas_datareader/tests/yahoo/test_yahoo.py index 925b5383..c05ed514 100644 --- a/pandas_datareader/tests/yahoo/test_yahoo.py +++ b/pandas_datareader/tests/yahoo/test_yahoo.py @@ -11,6 +11,8 @@ import pandas_datareader.data as web from pandas_datareader.data import YahooDailyReader from pandas_datareader.yahoo.quotes import _yahoo_codes +from pandas_datareader._utils import RemoteDataError +from pandas_datareader._testing import skip_on_exception class TestYahoo(object): @@ -19,6 +21,7 @@ class TestYahoo(object): def setup_class(cls): pytest.importorskip("lxml") + @skip_on_exception(RemoteDataError) def test_yahoo(self): # Asserts that yahoo is minimally working start = datetime(2010, 1, 1) @@ -35,7 +38,7 @@ def test_yahoo_fails(self): def test_get_quote_series(self): df = web.get_quote_yahoo(pd.Series(['GOOG', 'AAPL', 'GOOG'])) - tm.assert_series_equal(df.ix[0], df.ix[2]) + tm.assert_series_equal(df.iloc[0], df.iloc[2]) def test_get_quote_string(self): _yahoo_codes.update({'MarketCap': 'j1'}) @@ -44,7 +47,7 @@ def test_get_quote_string(self): def test_get_quote_stringlist(self): df = web.get_quote_yahoo(['GOOG', 'AAPL', 'GOOG']) - tm.assert_series_equal(df.ix[0], df.ix[2]) + tm.assert_series_equal(df.iloc[0], df.iloc[2]) def test_get_quote_comma_name(self): _yahoo_codes.update({'name': 'n'}) @@ -87,64 +90,66 @@ def test_get_components_nasdaq_100(self): # pragma: no cover index=['@^NDX']) tm.assert_frame_equal(df, expected) + @skip_on_exception(RemoteDataError) def test_get_data_single_symbol(self): # single symbol # http://finance.yahoo.com/q/hp?s=GOOG&a=09&b=08&c=2010&d=09&e=10&f=2010&g=d # just test that we succeed web.get_data_yahoo('GOOG') + @skip_on_exception(RemoteDataError) def test_get_data_adjust_price(self): goog = web.get_data_yahoo('GOOG') goog_adj = web.get_data_yahoo('GOOG', adjust_price=True) assert 'Adj Close' not in goog_adj.columns assert (goog['Open'] * goog_adj['Adj_Ratio']).equals(goog_adj['Open']) + @pytest.mark.xfail(reason="failing after #355") def test_get_data_interval(self): # daily interval data pan = web.get_data_yahoo('XOM', '2013-01-01', '2013-12-31', interval='d') - assert len(pan) == 252 + assert len(pan) == 251 # weekly interval data pan = web.get_data_yahoo('XOM', '2013-01-01', '2013-12-31', interval='w') - assert len(pan) == 53 + assert len(pan) == 52 - # montly interval data - pan = web.get_data_yahoo('XOM', '2013-01-01', + # monthly interval data + pan = web.get_data_yahoo('XOM', '2012-12-31', '2013-12-31', interval='m') assert len(pan) == 12 - # dividend data - pan = web.get_data_yahoo('XOM', '2013-01-01', - '2013-12-31', interval='v') - assert len(pan) == 4 - # test fail on invalid interval with pytest.raises(ValueError): web.get_data_yahoo('XOM', interval='NOT VALID') + @skip_on_exception(RemoteDataError) def test_get_data_multiple_symbols(self): # just test that we succeed sl = ['AAPL', 'AMZN', 'GOOG'] web.get_data_yahoo(sl, '2012') + @skip_on_exception(RemoteDataError) def test_get_data_multiple_symbols_two_dates(self): pan = web.get_data_yahoo(['GE', 'MSFT', 'INTC'], 'JAN-01-12', 'JAN-31-12') - result = pan.Close.ix['01-18-12'] - assert len(result) == 3 + result = pan.Close['01-18-12'].T + assert result.size == 3 # sanity checking - assert np.issubdtype(result.dtype, np.floating) + assert result.dtypes.all() == np.floating expected = np.array([[18.99, 28.4, 25.18], [18.58, 28.31, 25.13], [19.03, 28.16, 25.52], [18.81, 28.82, 25.87]]) - result = pan.Open.ix['Jan-15-12':'Jan-20-12'] + df = pan.Open + result = df[(df.index >= 'Jan-15-12') & (df.index <= 'Jan-20-12')] assert expected.shape == result.shape + @pytest.mark.xfail(reason="failing after #355") def test_get_date_ret_index(self): pan = web.get_data_yahoo(['GE', 'INTC', 'IBM'], '1977', '1987', ret_index=True) @@ -152,12 +157,13 @@ def test_get_date_ret_index(self): if hasattr(pan, 'Ret_Index') and hasattr(pan.Ret_Index, 'INTC'): tstamp = pan.Ret_Index.INTC.first_valid_index() - result = pan.Ret_Index.ix[tstamp]['INTC'] + result = pan.Ret_Index.loc[tstamp, 'INTC'] assert result == 1.0 # sanity checking assert np.issubdtype(pan.values.dtype, np.floating) + @pytest.mark.xfail(reason="failing after #355") def test_get_data_yahoo_actions(self): start = datetime(1990, 1, 1) end = datetime(2000, 4, 5) @@ -167,11 +173,11 @@ def test_get_data_yahoo_actions(self): assert sum(actions['action'] == 'DIVIDEND') == 20 assert sum(actions['action'] == 'SPLIT') == 1 - assert actions.ix['1995-05-11']['action'][0] == 'SPLIT' - assert actions.ix['1995-05-11']['value'][0] == 1 / 1.1 + assert actions.loc['1995-05-11', 'action'][0] == 'SPLIT' + assert actions.loc['1995-05-11', 'value'][0] == 1 / 1.1 - assert actions.ix['1993-05-10']['action'][0] == 'DIVIDEND' - assert actions.ix['1993-05-10']['value'][0] == 0.3 + assert actions.loc['1993-05-10', 'action'][0] == 'DIVIDEND' + assert actions.loc['1993-05-10', 'value'][0] == 0.3 def test_get_data_yahoo_actions_invalid_symbol(self): start = datetime(1990, 1, 1) @@ -180,6 +186,7 @@ def test_get_data_yahoo_actions_invalid_symbol(self): with pytest.raises(IOError): web.get_data_yahoo_actions('UNKNOWN TICKER', start, end) + @skip_on_exception(RemoteDataError) def test_yahoo_reader_class(self): r = YahooDailyReader('GOOG') df = r.read() @@ -191,6 +198,7 @@ def test_yahoo_reader_class(self): r = YahooDailyReader('GOOG', session=session) assert r.session is session + @pytest.mark.xfail(reason="failing after #355") def test_yahoo_DataReader(self): start = datetime(2010, 1, 1) end = datetime(2015, 5, 9) @@ -212,8 +220,11 @@ def test_yahoo_DataReader(self): 0.47, 0.43571, 0.43571, 0.43571, 0.43571, 0.37857, 0.37857, 0.37857]}, index=exp_idx) - tm.assert_frame_equal(result, exp) + exp.index.name = 'Date' + + tm.assert_frame_equal(result.reindex_like(exp), exp) + @skip_on_exception(RemoteDataError) def test_yahoo_DataReader_multi(self): start = datetime(2010, 1, 1) end = datetime(2015, 5, 9) diff --git a/pandas_datareader/wb.py b/pandas_datareader/wb.py index 59784a10..f425d2c7 100644 --- a/pandas_datareader/wb.py +++ b/pandas_datareader/wb.py @@ -159,6 +159,12 @@ def params(self): 'per_page': 25000, 'format': 'json'} def read(self): + try: + return self._read() + finally: + self.close() + + def _read(self): data = [] for indicator in self.symbols: # Build URL for api call @@ -321,7 +327,7 @@ def search(self, string='gdp.*capi', field='name', case=False): indicators = self.get_indicators() data = indicators[field] idx = data.str.contains(string, case=case) - out = indicators.ix[idx].dropna() + out = indicators.loc[idx].dropna() return out diff --git a/pandas_datareader/yahoo/actions.py b/pandas_datareader/yahoo/actions.py index 9e8b33ce..5965971a 100644 --- a/pandas_datareader/yahoo/actions.py +++ b/pandas_datareader/yahoo/actions.py @@ -1,61 +1,53 @@ -import csv -from pandas import to_datetime, DataFrame +from pandas import (concat, DataFrame) +from pandas_datareader.yahoo.daily import YahooDailyReader -from pandas_datareader.base import _DailyBaseReader - - -class YahooActionReader(_DailyBaseReader): +class YahooActionReader(YahooDailyReader): """ Returns DataFrame of historical corporate actions (dividends and stock splits) from symbols, over date range, start to end. All dates in the resulting DataFrame correspond with dividend and stock split ex-dates. """ + def read(self): + dividends = YahooDivReader(symbols=self.symbols, + start=self.start, + end=self.end, + retry_count=self.retry_count, + pause=self.pause, + session=self.session).read() + # Add a label column so we can combine our two DFs + if isinstance(dividends, DataFrame): + dividends["action"] = "DIVIDEND" + dividends = dividends.rename(columns={'Dividends': 'value'}) + + splits = YahooSplitReader(symbols=self.symbols, + start=self.start, + end=self.end, + retry_count=self.retry_count, + pause=self.pause, + session=self.session).read() + # Add a label column so we can combine our two DFs + if isinstance(splits, DataFrame): + splits["action"] = "SPLIT" + splits = splits.rename(columns={'Stock Splits': 'value'}) + # Converts fractional form splits (i.e. "2/1") into conversion + # ratios, then take the reciprocal + splits['value'] = splits.apply(lambda x: 1/eval(x['value']), axis=1) # noqa + + output = concat([dividends, splits]).sort_index(ascending=False) + + return output + + +class YahooDivReader(YahooDailyReader): + + @property + def service(self): + return 'div' + + +class YahooSplitReader(YahooDailyReader): @property - def url(self): - return 'http://ichart.finance.yahoo.com/x' - - def _get_params(self, symbols=None): - params = { - 's': self.symbols, - 'a': self.start.month - 1, - 'b': self.start.day, - 'c': self.start.year, - 'd': self.end.month - 1, - 'e': self.end.day, - 'f': self.end.year, - 'g': 'v' - } - return params - - def _read_lines(self, out): - actions_index = [] - actions_entries = [] - - for line in csv.reader(out.readlines()): - # Ignore lines that aren't dividends or splits (Yahoo - # add a bunch of irrelevant fields.) - if len(line) != 3 or line[0] not in ('DIVIDEND', 'SPLIT'): - continue - - action, date, value = line - if action == 'DIVIDEND': - actions_index.append(to_datetime(date)) - actions_entries.append({ - 'action': action, - 'value': float(value) - }) - elif action == 'SPLIT' and ':' in value: - # Convert the split ratio to a fraction. For example a - # 4:1 split expressed as a fraction is 1/4 = 0.25. - denominator, numerator = value.split(':', 1) - split_fraction = float(numerator) / float(denominator) - - actions_index.append(to_datetime(date)) - actions_entries.append({ - 'action': action, - 'value': split_fraction - }) - - return DataFrame(actions_entries, index=actions_index) + def service(self): + return 'split' diff --git a/pandas_datareader/yahoo/daily.py b/pandas_datareader/yahoo/daily.py index 9ee6cdcc..78777e73 100644 --- a/pandas_datareader/yahoo/daily.py +++ b/pandas_datareader/yahoo/daily.py @@ -1,4 +1,10 @@ -from pandas_datareader.base import _DailyBaseReader +import re +import time +import warnings +import numpy as np +from pandas import Panel +from pandas_datareader.base import (_DailyBaseReader, _in_chunks) +from pandas_datareader._utils import (RemoteDataError, SymbolWarning) class YahooDailyReader(_DailyBaseReader): @@ -39,47 +45,122 @@ class YahooDailyReader(_DailyBaseReader): """ def __init__(self, symbols=None, start=None, end=None, retry_count=3, - pause=0.001, session=None, adjust_price=False, + pause=0.35, session=None, adjust_price=False, ret_index=False, chunksize=25, interval='d'): super(YahooDailyReader, self).__init__(symbols=symbols, start=start, end=end, retry_count=retry_count, pause=pause, session=session, chunksize=chunksize) + # Ladder up the wait time between subsequent requests to improve + # probability of a successful retry + self.pause_multiplier = 2.5 + + self.headers = { + 'Connection': 'keep-alive', + 'Expires': str(-1), + 'Upgrade-Insecure-Requests': str(1), + # Google Chrome: + 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36' # noqa + } + self.adjust_price = adjust_price self.ret_index = ret_index - - if interval not in ['d', 'w', 'm', 'v']: - raise ValueError("Invalid interval: valid values are " - "'d', 'w', 'm' and 'v'") self.interval = interval + if self.interval not in ['d', 'wk', 'mo', 'm', 'w']: + raise ValueError("Invalid interval: valid values are 'd', 'wk' and 'mo'. 'm' and 'w' have been implemented for " # noqa + "backward compatibility. 'v' has been moved to the yahoo-actions or yahoo-dividends APIs.") # noqa + elif self.interval in ['m', 'mo']: + self.pdinterval = 'm' + self.interval = 'mo' + elif self.interval in ['w', 'wk']: + self.pdinterval = 'w' + self.interval = 'wk' + + self.interval = '1' + self.interval + self.crumb = self._get_crumb(retry_count) + + @property + def service(self): + return 'history' + @property def url(self): - return 'http://ichart.finance.yahoo.com/table.csv' + return 'https://query1.finance.yahoo.com/v7/finance/download/{}'\ + .format(self.symbols) + + @staticmethod + def yurl(symbol): + return 'https://query1.finance.yahoo.com/v7/finance/download/{}'\ + .format(symbol) def _get_params(self, symbol): + unix_start = int(time.mktime(self.start.timetuple())) + unix_end = int(time.mktime(self.end.timetuple())) + params = { - 's': symbol, - 'a': self.start.month - 1, - 'b': self.start.day, - 'c': self.start.year, - 'd': self.end.month - 1, - 'e': self.end.day, - 'f': self.end.year, - 'g': self.interval, - 'ignore': '.csv' + 'period1': unix_start, + 'period2': unix_end, + 'interval': self.interval, + 'events': self.service, + 'crumb': self.crumb } return params def read(self): """ read one data from specified URL """ - df = super(YahooDailyReader, self).read() - if self.ret_index: - df['Ret_Index'] = _calc_return_index(df['Adj Close']) - if self.adjust_price: - df = _adjust_prices(df) - return df + try: + df = super(YahooDailyReader, self).read() + if self.ret_index: + df['Ret_Index'] = _calc_return_index(df['Adj Close']) + if self.adjust_price: + df = _adjust_prices(df) + return df.sort_index() + finally: + self.close() + + def _dl_mult_symbols(self, symbols): + stocks = {} + failed = [] + passed = [] + for sym_group in _in_chunks(symbols, self.chunksize): + for sym in sym_group: + try: + stocks[sym] = self._read_one_data(self.yurl(sym), + self._get_params(sym)) + passed.append(sym) + except IOError: + msg = 'Failed to read symbol: {0!r}, replacing with NaN.' + warnings.warn(msg.format(sym), SymbolWarning) + failed.append(sym) + + if len(passed) == 0: + msg = "No data fetched using {0!r}" + raise RemoteDataError(msg.format(self.__class__.__name__)) + try: + if len(stocks) > 0 and len(failed) > 0 and len(passed) > 0: + df_na = stocks[passed[0]].copy() + df_na[:] = np.nan + for sym in failed: + stocks[sym] = df_na + return Panel(stocks).swapaxes('items', 'minor') + except AttributeError: + # cannot construct a panel with just 1D nans indicating no data + msg = "No data fetched using {0!r}" + raise RemoteDataError(msg.format(self.__class__.__name__)) + + def _get_crumb(self, retries): + # Scrape a history page for a valid crumb ID: + tu = "https://finance.yahoo.com/quote/{}/history".format(self.symbols) + response = self._get_response(tu, + params=self.params, headers=self.headers) + out = str(self._sanitize_response(response)) + # Matches: {"crumb":"AlphaNumeric"} + rpat = '"CrumbStore":{"crumb":"([^"]+)"}' + + crumb = re.findall(rpat, out)[0] + return crumb.encode('ascii').decode('unicode-escape') def _adjust_prices(hist_data, price_list=None): @@ -105,15 +186,16 @@ def _calc_return_index(price_df): (typically NaN) is set to 1. """ df = price_df.pct_change().add(1).cumprod() - mask = df.ix[1].notnull() & df.ix[0].isnull() - df.ix[0][mask] = 1 + mask = df.iloc[1].notnull() & df.iloc[0].isnull() + df.loc[df.index[0], mask] = 1 # Check for first stock listings after starting date of index in ret_index # If True, find first_valid_index and set previous entry to 1. if (~mask).any(): for sym in mask.index[~mask]: + sym_idx = df.columns.get_loc(sym) tstamp = df[sym].first_valid_index() t_idx = df.index.get_loc(tstamp) - 1 - df[sym].ix[t_idx] = 1 + df.iloc[t_idx, sym_idx] = 1 return df diff --git a/pandas_datareader/yahoo/options.py b/pandas_datareader/yahoo/options.py index f0dfc6da..facde35e 100644 --- a/pandas_datareader/yahoo/options.py +++ b/pandas_datareader/yahoo/options.py @@ -137,7 +137,7 @@ def get_options_data(self, month=None, year=None, expiry=None): """ return concat([f(month, year, expiry) for f in (self.get_put_data, - self.get_call_data)]).sortlevel() + self.get_call_data)]).sort_index() def _option_from_url(self, url): @@ -810,17 +810,22 @@ def _load_data(self, exp_dates=None): pandas.DataFrame A DataFrame with requested options data. """ - epoch = dt.datetime.utcfromtimestamp(0) - if exp_dates is None: - exp_dates = self._get_expiry_dates() - exp_unix_times = [int((dt.datetime( - exp_date.year, exp_date.month, exp_date.day) - - epoch).total_seconds()) - for exp_date in exp_dates] data = [] - for exp_date in exp_unix_times: - url = (self._OPTIONS_BASE_URL + '?date={exp_date}').format( - sym=self.symbol, exp_date=exp_date) - jd = self._parse_url(url) - data.append(self._process_data(jd)) - return concat(data).sortlevel() + epoch = dt.datetime.utcfromtimestamp(0) + + try: + if exp_dates is None: + exp_dates = self._get_expiry_dates() + exp_unix_times = [int((dt.datetime(exp_date.year, + exp_date.month, + exp_date.day) - epoch + ).total_seconds()) + for exp_date in exp_dates] + for exp_date in exp_unix_times: + url = (self._OPTIONS_BASE_URL + '?date={exp_date}').format( + sym=self.symbol, exp_date=exp_date) + jd = self._parse_url(url) + data.append(self._process_data(jd)) + return concat(data).sort_index() + finally: + self.close() diff --git a/test.sh b/test.sh new file mode 100755 index 00000000..16bb8ef7 --- /dev/null +++ b/test.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +pytest -s -r xX pandas_datareader "$@" diff --git a/tox.ini b/tox.ini index 5edf1d32..99eda4cc 100644 --- a/tox.ini +++ b/tox.ini @@ -1,5 +1,5 @@ [tox] -envlist=py{26,27,32,33,34} +envlist=py{27,35,36} [testenv] commands= @@ -7,8 +7,3 @@ commands= deps= pytest pytest-cov - -[testenv:py26] -deps= - unittest2 - {[testenv]deps}