diff --git a/pandas/io/common.py b/pandas/io/common.py index 0159716248b11..7151a34cd37de 100644 --- a/pandas/io/common.py +++ b/pandas/io/common.py @@ -64,7 +64,7 @@ def __next__(self): raise AbstractMethodError(self) -def _is_url(url) -> bool: +def is_url(url) -> bool: """ Check to see if a URL has a valid protocol. @@ -102,7 +102,7 @@ def _expand_user( return filepath_or_buffer -def _validate_header_arg(header) -> None: +def validate_header_arg(header) -> None: if isinstance(header, bool): raise TypeError( "Passing a bool to header is invalid. " @@ -112,7 +112,7 @@ def _validate_header_arg(header) -> None: ) -def _stringify_path( +def stringify_path( filepath_or_buffer: FilePathOrBuffer[AnyStr], ) -> FilePathOrBuffer[AnyStr]: """Attempt to convert a path-like object to a string. @@ -193,9 +193,9 @@ def get_filepath_or_buffer( compression, str, should_close, bool) """ - filepath_or_buffer = _stringify_path(filepath_or_buffer) + filepath_or_buffer = stringify_path(filepath_or_buffer) - if isinstance(filepath_or_buffer, str) and _is_url(filepath_or_buffer): + if isinstance(filepath_or_buffer, str) and is_url(filepath_or_buffer): req = urlopen(filepath_or_buffer) content_encoding = req.headers.get("Content-Encoding", None) if content_encoding == "gzip": @@ -250,7 +250,7 @@ def file_path_to_url(path: str) -> str: _compression_to_extension = {"gzip": ".gz", "bz2": ".bz2", "zip": ".zip", "xz": ".xz"} -def _get_compression_method( +def get_compression_method( compression: Optional[Union[str, Mapping[str, str]]] ) -> Tuple[Optional[str], Dict[str, str]]: """ @@ -283,7 +283,7 @@ def _get_compression_method( return compression, compression_args -def _infer_compression( +def infer_compression( filepath_or_buffer: FilePathOrBuffer, compression: Optional[str] ) -> Optional[str]: """ @@ -317,7 +317,7 @@ def _infer_compression( # Infer compression if compression == "infer": # Convert all path types (e.g. pathlib.Path) to strings - filepath_or_buffer = _stringify_path(filepath_or_buffer) + filepath_or_buffer = stringify_path(filepath_or_buffer) if not isinstance(filepath_or_buffer, str): # Cannot infer compression of a buffer, assume no compression return None @@ -338,7 +338,7 @@ def _infer_compression( raise ValueError(msg) -def _get_handle( +def get_handle( path_or_buf, mode: str, encoding=None, @@ -396,12 +396,12 @@ def _get_handle( f = path_or_buf # Convert pathlib.Path/py.path.local or string - path_or_buf = _stringify_path(path_or_buf) + path_or_buf = stringify_path(path_or_buf) is_path = isinstance(path_or_buf, str) - compression, compression_args = _get_compression_method(compression) + compression, compression_args = get_compression_method(compression) if is_path: - compression = _infer_compression(path_or_buf, compression) + compression = infer_compression(path_or_buf, compression) if compression: @@ -421,7 +421,7 @@ def _get_handle( # ZIP Compression elif compression == "zip": - zf = BytesZipFile(path_or_buf, mode, **compression_args) + zf = _BytesZipFile(path_or_buf, mode, **compression_args) # Ensure the container is closed as well. handles.append(zf) if zf.mode == "w": @@ -472,7 +472,7 @@ def _get_handle( if memory_map and hasattr(f, "fileno"): try: - wrapped = MMapWrapper(f) + wrapped = _MMapWrapper(f) f.close() f = wrapped except Exception: @@ -485,7 +485,7 @@ def _get_handle( return f, handles -class BytesZipFile(zipfile.ZipFile, BytesIO): # type: ignore +class _BytesZipFile(zipfile.ZipFile, BytesIO): # type: ignore """ Wrapper for standard library class ZipFile and allow the returned file-like handle to accept byte strings via `write` method. @@ -518,7 +518,7 @@ def closed(self): return self.fp is None -class MMapWrapper(BaseIterator): +class _MMapWrapper(BaseIterator): """ Wrapper for the Python's mmap class so that it can be properly read in by Python's csv.reader class. @@ -537,7 +537,7 @@ def __init__(self, f: IO): def __getattr__(self, name: str): return getattr(self.mmap, name) - def __iter__(self) -> "MMapWrapper": + def __iter__(self) -> "_MMapWrapper": return self def __next__(self) -> str: diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py index 8368142c3633a..553334407d12e 100644 --- a/pandas/io/excel/_base.py +++ b/pandas/io/excel/_base.py @@ -15,11 +15,11 @@ from pandas.core.frame import DataFrame from pandas.io.common import ( - _is_url, - _stringify_path, - _validate_header_arg, get_filepath_or_buffer, + is_url, + stringify_path, urlopen, + validate_header_arg, ) from pandas.io.excel._util import ( _fill_mi_header, @@ -339,7 +339,7 @@ def read_excel( class _BaseExcelReader(metaclass=abc.ABCMeta): def __init__(self, filepath_or_buffer): # If filepath_or_buffer is a url, load the data into a BytesIO - if _is_url(filepath_or_buffer): + if is_url(filepath_or_buffer): filepath_or_buffer = BytesIO(urlopen(filepath_or_buffer).read()) elif not isinstance(filepath_or_buffer, (ExcelFile, self._workbook_class)): filepath_or_buffer, _, _, _ = get_filepath_or_buffer(filepath_or_buffer) @@ -408,7 +408,7 @@ def parse( **kwds, ): - _validate_header_arg(header) + validate_header_arg(header) ret_dict = False @@ -708,7 +708,7 @@ def __init__( self.mode = mode def __fspath__(self): - return _stringify_path(self.path) + return stringify_path(self.path) def _get_sheet_name(self, sheet_name): if sheet_name is None: @@ -808,7 +808,7 @@ def __init__(self, io, engine=None): # could be a str, ExcelFile, Book, etc. self.io = io # Always a string - self._io = _stringify_path(io) + self._io = stringify_path(io) self._reader = self._engines[engine](self._io) diff --git a/pandas/io/feather_format.py b/pandas/io/feather_format.py index 41bdf97c1fe1f..eb05004d9137c 100644 --- a/pandas/io/feather_format.py +++ b/pandas/io/feather_format.py @@ -4,7 +4,7 @@ from pandas import DataFrame, Int64Index, RangeIndex -from pandas.io.common import _stringify_path +from pandas.io.common import stringify_path def to_feather(df: DataFrame, path): @@ -20,7 +20,7 @@ def to_feather(df: DataFrame, path): import_optional_dependency("pyarrow") from pyarrow import feather - path = _stringify_path(path) + path = stringify_path(path) if not isinstance(df, DataFrame): raise ValueError("feather only support IO with DataFrames") @@ -98,6 +98,6 @@ def read_feather(path, columns=None, use_threads: bool = True): import_optional_dependency("pyarrow") from pyarrow import feather - path = _stringify_path(path) + path = stringify_path(path) return feather.read_feather(path, columns=columns, use_threads=bool(use_threads)) diff --git a/pandas/io/formats/csvs.py b/pandas/io/formats/csvs.py index ae5d1d30bcddb..c0071028a8ef4 100644 --- a/pandas/io/formats/csvs.py +++ b/pandas/io/formats/csvs.py @@ -23,10 +23,10 @@ from pandas.io.common import ( UnicodeWriter, - _get_compression_method, - _get_handle, - _infer_compression, + get_compression_method, get_filepath_or_buffer, + get_handle, + infer_compression, ) @@ -61,7 +61,7 @@ def __init__( path_or_buf = StringIO() # Extract compression mode as given, if dict - compression, self.compression_args = _get_compression_method(compression) + compression, self.compression_args = get_compression_method(compression) self.path_or_buf, _, _, _ = get_filepath_or_buffer( path_or_buf, encoding=encoding, compression=compression, mode=mode @@ -78,7 +78,7 @@ def __init__( if encoding is None: encoding = "utf-8" self.encoding = encoding - self.compression = _infer_compression(self.path_or_buf, compression) + self.compression = infer_compression(self.path_or_buf, compression) if quoting is None: quoting = csvlib.QUOTE_MINIMAL @@ -179,7 +179,7 @@ def save(self): f = self.path_or_buf close = False else: - f, handles = _get_handle( + f, handles = get_handle( self.path_or_buf, self.mode, encoding=self.encoding, @@ -212,7 +212,7 @@ def save(self): else: compression = dict(self.compression_args, method=self.compression) - f, handles = _get_handle( + f, handles = get_handle( self.path_or_buf, self.mode, encoding=self.encoding, diff --git a/pandas/io/formats/excel.py b/pandas/io/formats/excel.py index 2f7a80eea1554..18340bc702378 100644 --- a/pandas/io/formats/excel.py +++ b/pandas/io/formats/excel.py @@ -15,6 +15,7 @@ from pandas import Index import pandas.core.common as com +from pandas.io.common import stringify_path from pandas.io.formats.css import CSSResolver, CSSWarning from pandas.io.formats.format import get_level_lengths from pandas.io.formats.printing import pprint_thing @@ -711,7 +712,6 @@ def write( and ``io.excel.xlsm.writer``. """ from pandas.io.excel import ExcelWriter - from pandas.io.common import _stringify_path num_rows, num_cols = self.df.shape if num_rows > self.max_rows or num_cols > self.max_cols: @@ -724,7 +724,7 @@ def write( if isinstance(writer, ExcelWriter): need_save = False else: - writer = ExcelWriter(_stringify_path(writer), engine=engine) + writer = ExcelWriter(stringify_path(writer), engine=engine) need_save = True formatted_cells = self.get_formatted_cells() diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index 124bd31c8d308..b0574925cf1b1 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -72,7 +72,7 @@ from pandas.core.indexes.datetimes import DatetimeIndex from pandas.core.indexes.timedeltas import TimedeltaIndex -from pandas.io.common import _stringify_path +from pandas.io.common import stringify_path from pandas.io.formats.printing import adjoin, justify, pprint_thing if TYPE_CHECKING: @@ -482,7 +482,7 @@ def get_buffer( objects, otherwise yield buf unchanged. """ if buf is not None: - buf = _stringify_path(buf) + buf = stringify_path(buf) else: buf = StringIO() diff --git a/pandas/io/formats/html.py b/pandas/io/formats/html.py index 0c6b0c1a5810b..3a3347a5c86ea 100644 --- a/pandas/io/formats/html.py +++ b/pandas/io/formats/html.py @@ -12,7 +12,7 @@ from pandas import option_context -from pandas.io.common import _is_url +from pandas.io.common import is_url from pandas.io.formats.format import ( DataFrameFormatter, TableFormatter, @@ -147,7 +147,7 @@ def _write_cell( rs = pprint_thing(s, escape_chars=esc).strip() - if self.render_links and _is_url(rs): + if self.render_links and is_url(rs): rs_unescaped = pprint_thing(s, escape_chars={}).strip() start_tag += ''.format(url=rs_unescaped) end_a = "" diff --git a/pandas/io/html.py b/pandas/io/html.py index 3521bad375aa6..eafcca0e85bb3 100644 --- a/pandas/io/html.py +++ b/pandas/io/html.py @@ -16,7 +16,7 @@ from pandas.core.construction import create_series_with_explicit_dtype -from pandas.io.common import _is_url, _validate_header_arg, urlopen +from pandas.io.common import is_url, urlopen, validate_header_arg from pandas.io.formats.printing import pprint_thing from pandas.io.parsers import TextParser @@ -117,7 +117,7 @@ def _read(obj): ------- raw_text : str """ - if _is_url(obj): + if is_url(obj): with urlopen(obj) as url: text = url.read() elif hasattr(obj, "read"): @@ -705,7 +705,7 @@ def _build_doc(self): parser = HTMLParser(recover=True, encoding=self.encoding) try: - if _is_url(self.io): + if is_url(self.io): with urlopen(self.io) as f: r = parse(f, parser=parser) else: @@ -717,7 +717,7 @@ def _build_doc(self): pass except (UnicodeDecodeError, IOError) as e: # if the input is a blob of html goop - if not _is_url(self.io): + if not is_url(self.io): r = fromstring(self.io, parser=parser) try: @@ -1076,7 +1076,7 @@ def read_html( "cannot skip rows starting from the end of the " "data (you passed a negative value)" ) - _validate_header_arg(header) + validate_header_arg(header) return _parse( flavor=flavor, io=io, diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py index 6cb811bb97755..14a272e15bc29 100644 --- a/pandas/io/json/_json.py +++ b/pandas/io/json/_json.py @@ -20,10 +20,10 @@ from pandas.io.common import ( BaseIterator, - _get_handle, - _infer_compression, - _stringify_path, get_filepath_or_buffer, + get_handle, + infer_compression, + stringify_path, ) from pandas.io.formats.printing import pprint_thing from pandas.io.parsers import _validate_integer @@ -58,7 +58,7 @@ def to_json( "'index=False' is only valid when 'orient' is " "'split' or 'table'" ) - path_or_buf = _stringify_path(path_or_buf) + path_or_buf = stringify_path(path_or_buf) if lines and orient != "records": raise ValueError("'lines' keyword only valid when 'orient' is records") @@ -91,7 +91,7 @@ def to_json( s = convert_to_line_delimits(s) if isinstance(path_or_buf, str): - fh, handles = _get_handle(path_or_buf, "w", compression=compression) + fh, handles = get_handle(path_or_buf, "w", compression=compression) try: fh.write(s) finally: @@ -584,7 +584,7 @@ def read_json( if encoding is None: encoding = "utf-8" - compression = _infer_compression(path_or_buf, compression) + compression = infer_compression(path_or_buf, compression) filepath_or_buffer, _, compression, should_close = get_filepath_or_buffer( path_or_buf, encoding=encoding, compression=compression ) @@ -704,7 +704,7 @@ def _get_data_from_filepath(self, filepath_or_buffer): pass if exists or self.compression is not None: - data, _ = _get_handle( + data, _ = get_handle( filepath_or_buffer, "r", encoding=self.encoding, diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 32d812637a067..cc3d2bd12ca35 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -35,6 +35,7 @@ is_categorical_dtype, is_dtype_equal, is_extension_array_dtype, + is_file_like, is_float, is_integer, is_integer_dtype, @@ -64,11 +65,10 @@ BaseIterator, UnicodeReader, UTF8Recoder, - _get_handle, - _infer_compression, - _validate_header_arg, get_filepath_or_buffer, - is_file_like, + get_handle, + infer_compression, + validate_header_arg, ) from pandas.io.date_converters import generic_parser @@ -426,7 +426,7 @@ def _read(filepath_or_buffer: FilePathOrBuffer, kwds): kwds["encoding"] = encoding compression = kwds.get("compression", "infer") - compression = _infer_compression(filepath_or_buffer, compression) + compression = infer_compression(filepath_or_buffer, compression) # TODO: get_filepath_or_buffer could return # Union[FilePathOrBuffer, s3fs.S3File, gcsfs.GCSFile] @@ -1050,7 +1050,7 @@ def _clean_options(self, options, engine): na_values = options["na_values"] skiprows = options["skiprows"] - _validate_header_arg(options["header"]) + validate_header_arg(options["header"]) depr_warning = "" @@ -2283,7 +2283,7 @@ def __init__(self, f, **kwds): self.comment = kwds["comment"] self._comment_lines = [] - f, handles = _get_handle( + f, handles = get_handle( f, "r", encoding=self.encoding, diff --git a/pandas/io/pickle.py b/pandas/io/pickle.py index 0a0ccedd78f00..6ce52da21b4e8 100644 --- a/pandas/io/pickle.py +++ b/pandas/io/pickle.py @@ -4,7 +4,7 @@ from pandas.compat import pickle_compat as pc -from pandas.io.common import _get_handle, _stringify_path +from pandas.io.common import get_handle, stringify_path def to_pickle(obj, path, compression="infer", protocol=pickle.HIGHEST_PROTOCOL): @@ -63,8 +63,8 @@ def to_pickle(obj, path, compression="infer", protocol=pickle.HIGHEST_PROTOCOL): >>> import os >>> os.remove("./dummy.pkl") """ - path = _stringify_path(path) - f, fh = _get_handle(path, "wb", compression=compression, is_text=False) + path = stringify_path(path) + f, fh = get_handle(path, "wb", compression=compression, is_text=False) if protocol < 0: protocol = pickle.HIGHEST_PROTOCOL try: @@ -134,8 +134,8 @@ def read_pickle(path, compression="infer"): >>> import os >>> os.remove("./dummy.pkl") """ - path = _stringify_path(path) - f, fh = _get_handle(path, "rb", compression=compression, is_text=False) + path = stringify_path(path) + f, fh = get_handle(path, "rb", compression=compression, is_text=False) # 1) try standard library Pickle # 2) try pickle_compat (older pandas version) to handle subclass changes diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index d14b4ecf070a7..8e0ab27c1fa85 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -63,7 +63,7 @@ from pandas.core.computation.pytables import PyTablesExpr, maybe_expression from pandas.core.indexes.api import ensure_index -from pandas.io.common import _stringify_path +from pandas.io.common import stringify_path from pandas.io.formats.printing import adjoin, pprint_thing if TYPE_CHECKING: @@ -274,7 +274,7 @@ def to_hdf( encoding=encoding, ) - path_or_buf = _stringify_path(path_or_buf) + path_or_buf = stringify_path(path_or_buf) if isinstance(path_or_buf, str): with HDFStore( path_or_buf, mode=mode, complevel=complevel, complib=complib @@ -379,7 +379,7 @@ def read_hdf( store = path_or_buf auto_close = False else: - path_or_buf = _stringify_path(path_or_buf) + path_or_buf = stringify_path(path_or_buf) if not isinstance(path_or_buf, str): raise NotImplementedError( "Support for generic buffers has not been implemented." @@ -525,7 +525,7 @@ def __init__( if complib is None and complevel is not None: complib = tables.filters.default_complib - self._path = _stringify_path(path) + self._path = stringify_path(path) if mode is None: mode = "a" self._mode = mode diff --git a/pandas/io/sas/sasreader.py b/pandas/io/sas/sasreader.py index 6bd3532d538c7..56ebb583bc2f9 100644 --- a/pandas/io/sas/sasreader.py +++ b/pandas/io/sas/sasreader.py @@ -1,7 +1,7 @@ """ Read SAS sas7bdat or xport files. """ -from pandas.io.common import _stringify_path +from pandas.io.common import stringify_path def read_sas( @@ -52,7 +52,7 @@ def read_sas( "than a string name, you must specify " "a format string" ) - filepath_or_buffer = _stringify_path(filepath_or_buffer) + filepath_or_buffer = stringify_path(filepath_or_buffer) if not isinstance(filepath_or_buffer, str): raise ValueError(buffer_error_msg) fname = filepath_or_buffer.lower() diff --git a/pandas/io/stata.py b/pandas/io/stata.py index dbe64e4c0f06d..84dd302fc293f 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -44,7 +44,7 @@ from pandas.core.frame import DataFrame from pandas.core.series import Series -from pandas.io.common import BaseIterator, _stringify_path, get_filepath_or_buffer +from pandas.io.common import BaseIterator, get_filepath_or_buffer, stringify_path _version_error = ( "Version of given Stata file is not 104, 105, 108, " @@ -1051,7 +1051,7 @@ def __init__( self._lines_read = 0 self._native_byteorder = _set_endianness(sys.byteorder) - path_or_buf = _stringify_path(path_or_buf) + path_or_buf = stringify_path(path_or_buf) if isinstance(path_or_buf, str): path_or_buf, encoding, _, should_close = get_filepath_or_buffer(path_or_buf) @@ -2112,7 +2112,7 @@ def __init__( if byteorder is None: byteorder = sys.byteorder self._byteorder = _set_endianness(byteorder) - self._fname = _stringify_path(fname) + self._fname = stringify_path(fname) self.type_converters = {253: np.int32, 252: np.int16, 251: np.int8} self._converted_names = {} diff --git a/pandas/tests/frame/test_to_csv.py b/pandas/tests/frame/test_to_csv.py index ad058faff96e7..5c39dcc1a7659 100644 --- a/pandas/tests/frame/test_to_csv.py +++ b/pandas/tests/frame/test_to_csv.py @@ -21,7 +21,7 @@ import pandas.core.common as com import pandas.util.testing as tm -from pandas.io.common import _get_handle +from pandas.io.common import get_handle MIXED_FLOAT_DTYPES = ["float16", "float32", "float64"] MIXED_INT_DTYPES = [ @@ -1065,7 +1065,7 @@ def test_to_csv_compression(self, df, encoding, compression): tm.assert_frame_equal(df, result) # test the round trip using file handle - to_csv -> read_csv - f, _handles = _get_handle( + f, _handles = get_handle( filename, "w", compression=compression, encoding=encoding ) with f: diff --git a/pandas/tests/io/test_common.py b/pandas/tests/io/test_common.py index d2633ea0676cd..f4efbbeda6311 100644 --- a/pandas/tests/io/test_common.py +++ b/pandas/tests/io/test_common.py @@ -68,9 +68,9 @@ def test_expand_user_normal_path(self): assert os.path.expanduser(filename) == expanded_name def test_stringify_path_pathlib(self): - rel_path = icom._stringify_path(Path(".")) + rel_path = icom.stringify_path(Path(".")) assert rel_path == "." - redundant_path = icom._stringify_path(Path("foo//bar")) + redundant_path = icom.stringify_path(Path("foo//bar")) assert redundant_path == os.path.join("foo", "bar") @td.skip_if_no("py.path") @@ -78,11 +78,11 @@ def test_stringify_path_localpath(self): path = os.path.join("foo", "bar") abs_path = os.path.abspath(path) lpath = LocalPath(path) - assert icom._stringify_path(lpath) == abs_path + assert icom.stringify_path(lpath) == abs_path def test_stringify_path_fspath(self): p = CustomFSPath("foo/bar.csv") - result = icom._stringify_path(p) + result = icom.stringify_path(p) assert result == "foo/bar.csv" @pytest.mark.parametrize( @@ -92,7 +92,7 @@ def test_stringify_path_fspath(self): @pytest.mark.parametrize("path_type", path_types) def test_infer_compression_from_path(self, extension, expected, path_type): path = path_type("foo/bar.csv" + extension) - compression = icom._infer_compression(path, compression="infer") + compression = icom.infer_compression(path, compression="infer") assert compression == expected def test_get_filepath_or_buffer_with_path(self): @@ -313,18 +313,18 @@ def test_constructor_bad_file(self, mmap_file): err = mmap.error with pytest.raises(err, match=msg): - icom.MMapWrapper(non_file) + icom._MMapWrapper(non_file) target = open(mmap_file, "r") target.close() msg = "I/O operation on closed file" with pytest.raises(ValueError, match=msg): - icom.MMapWrapper(target) + icom._MMapWrapper(target) def test_get_attr(self, mmap_file): with open(mmap_file, "r") as target: - wrapper = icom.MMapWrapper(target) + wrapper = icom._MMapWrapper(target) attrs = dir(wrapper.mmap) attrs = [attr for attr in attrs if not attr.startswith("__")] @@ -337,7 +337,7 @@ def test_get_attr(self, mmap_file): def test_next(self, mmap_file): with open(mmap_file, "r") as target: - wrapper = icom.MMapWrapper(target) + wrapper = icom._MMapWrapper(target) lines = target.readlines() for line in lines: diff --git a/pandas/tests/io/test_compression.py b/pandas/tests/io/test_compression.py index 54eb2d78fb64f..e17a32cbc8b68 100644 --- a/pandas/tests/io/test_compression.py +++ b/pandas/tests/io/test_compression.py @@ -44,14 +44,14 @@ def test_compression_size(obj, method, compression_only): @pytest.mark.parametrize("method", ["to_csv", "to_json"]) def test_compression_size_fh(obj, method, compression_only): with tm.ensure_clean() as path: - f, handles = icom._get_handle(path, "w", compression=compression_only) + f, handles = icom.get_handle(path, "w", compression=compression_only) with f: getattr(obj, method)(f) assert not f.closed assert f.closed compressed_size = os.path.getsize(path) with tm.ensure_clean() as path: - f, handles = icom._get_handle(path, "w", compression=None) + f, handles = icom.get_handle(path, "w", compression=None) with f: getattr(obj, method)(f) assert not f.closed @@ -108,7 +108,7 @@ def test_compression_warning(compression_only): columns=["X", "Y", "Z"], ) with tm.ensure_clean() as path: - f, handles = icom._get_handle(path, "w", compression=compression_only) + f, handles = icom.get_handle(path, "w", compression=compression_only) with tm.assert_produces_warning(RuntimeWarning, check_stacklevel=False): with f: df.to_csv(f, compression=compression_only) diff --git a/pandas/tests/series/test_io.py b/pandas/tests/series/test_io.py index 9041d582b19ca..b48c79000c98d 100644 --- a/pandas/tests/series/test_io.py +++ b/pandas/tests/series/test_io.py @@ -9,7 +9,7 @@ from pandas import DataFrame, Series import pandas.util.testing as tm -from pandas.io.common import _get_handle +from pandas.io.common import get_handle class TestSeriesToCSV: @@ -143,7 +143,7 @@ def test_to_csv_compression(self, s, encoding, compression): tm.assert_series_equal(s, result) # test the round trip using file handle - to_csv -> read_csv - f, _handles = _get_handle( + f, _handles = get_handle( filename, "w", compression=compression, encoding=encoding ) with f: