diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py index 425b1da33dbb9..c519baa4c21da 100644 --- a/pandas/io/excel/_base.py +++ b/pandas/io/excel/_base.py @@ -316,33 +316,36 @@ def read_excel( "an ExcelFile - ExcelFile already has the engine set" ) - data = io.parse( - sheet_name=sheet_name, - header=header, - names=names, - index_col=index_col, - usecols=usecols, - squeeze=squeeze, - dtype=dtype, - converters=converters, - true_values=true_values, - false_values=false_values, - skiprows=skiprows, - nrows=nrows, - na_values=na_values, - keep_default_na=keep_default_na, - na_filter=na_filter, - verbose=verbose, - parse_dates=parse_dates, - date_parser=date_parser, - thousands=thousands, - comment=comment, - skipfooter=skipfooter, - convert_float=convert_float, - mangle_dupe_cols=mangle_dupe_cols, - ) - if should_close: - io.close() + try: + data = io.parse( + sheet_name=sheet_name, + header=header, + names=names, + index_col=index_col, + usecols=usecols, + squeeze=squeeze, + dtype=dtype, + converters=converters, + true_values=true_values, + false_values=false_values, + skiprows=skiprows, + nrows=nrows, + na_values=na_values, + keep_default_na=keep_default_na, + na_filter=na_filter, + verbose=verbose, + parse_dates=parse_dates, + date_parser=date_parser, + thousands=thousands, + comment=comment, + skipfooter=skipfooter, + convert_float=convert_float, + mangle_dupe_cols=mangle_dupe_cols, + ) + finally: + # make sure to close opened file handles + if should_close: + io.close() return data diff --git a/pandas/io/formats/excel.py b/pandas/io/formats/excel.py index c6179f5c034c7..fe471c6f6f9ac 100644 --- a/pandas/io/formats/excel.py +++ b/pandas/io/formats/excel.py @@ -818,6 +818,7 @@ def write( f"Max sheet size is: {self.max_rows}, {self.max_cols}" ) + formatted_cells = self.get_formatted_cells() if isinstance(writer, ExcelWriter): need_save = False else: @@ -829,13 +830,15 @@ def write( ) need_save = True - formatted_cells = self.get_formatted_cells() - writer.write_cells( - formatted_cells, - sheet_name, - startrow=startrow, - startcol=startcol, - freeze_panes=freeze_panes, - ) - if need_save: - writer.save() + try: + writer.write_cells( + formatted_cells, + sheet_name, + startrow=startrow, + startcol=startcol, + freeze_panes=freeze_panes, + ) + finally: + # make sure to close opened file handles + if need_save: + writer.close() diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py index f30007f6ed907..1f62b6a8096a8 100644 --- a/pandas/io/json/_json.py +++ b/pandas/io/json/_json.py @@ -631,6 +631,7 @@ def _preprocess_data(self, data): """ if hasattr(data, "read") and (not self.chunksize or not self.nrows): data = data.read() + self.close() if not hasattr(data, "read") and (self.chunksize or self.nrows): data = StringIO(data) diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index ad5385cd659ef..8d9787a9c8c9e 100644 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -2066,6 +2066,7 @@ def read(self, nrows=None): return index, columns, col_dict else: + self.close() raise # Done with first read, next time raise StopIteration @@ -2449,6 +2450,7 @@ def read(self, rows=None): if self._first_chunk: content = [] else: + self.close() raise # done with first read, next time raise StopIteration diff --git a/pandas/io/sas/sas7bdat.py b/pandas/io/sas/sas7bdat.py index e9c1bf26f6675..7c2b801ee0ea8 100644 --- a/pandas/io/sas/sas7bdat.py +++ b/pandas/io/sas/sas7bdat.py @@ -203,7 +203,6 @@ def _get_properties(self): self._path_or_buf.seek(0) self._cached_page = self._path_or_buf.read(288) if self._cached_page[0 : len(const.magic)] != const.magic: - self.close() raise ValueError("magic number mismatch (not a SAS file?)") # Get alignment information @@ -279,7 +278,6 @@ def _get_properties(self): buf = self._path_or_buf.read(self.header_length - 288) self._cached_page += buf if len(self._cached_page) != self.header_length: - self.close() raise ValueError("The SAS7BDAT file appears to be truncated.") self._page_length = self._read_int( @@ -333,6 +331,7 @@ def _get_properties(self): def __next__(self): da = self.read(nrows=self.chunksize or 1) if da is None: + self.close() raise StopIteration return da @@ -377,7 +376,6 @@ def _parse_metadata(self): if len(self._cached_page) <= 0: break if len(self._cached_page) != self._page_length: - self.close() raise ValueError("Failed to read a meta data page from the SAS file.") done = self._process_page_meta() diff --git a/pandas/io/sas/sas_xport.py b/pandas/io/sas/sas_xport.py index 2f5de16a7ad6c..2ecfbed8cc83f 100644 --- a/pandas/io/sas/sas_xport.py +++ b/pandas/io/sas/sas_xport.py @@ -276,14 +276,12 @@ def _read_header(self): # read file header line1 = self._get_row() if line1 != _correct_line1: - self.close() raise ValueError("Header record is not an XPORT file.") line2 = self._get_row() fif = [["prefix", 24], ["version", 8], ["OS", 8], ["_", 24], ["created", 16]] file_info = _split_line(line2, fif) if file_info["prefix"] != "SAS SAS SASLIB": - self.close() raise ValueError("Header record has invalid prefix.") file_info["created"] = _parse_date(file_info["created"]) self.file_info = file_info @@ -297,7 +295,6 @@ def _read_header(self): headflag1 = header1.startswith(_correct_header1) headflag2 = header2 == _correct_header2 if not (headflag1 and headflag2): - self.close() raise ValueError("Member header not found") # usually 140, could be 135 fieldnamelength = int(header1[-5:-2]) @@ -346,7 +343,6 @@ def _read_header(self): field["ntype"] = types[field["ntype"]] fl = field["field_length"] if field["ntype"] == "numeric" and ((fl < 2) or (fl > 8)): - self.close() msg = f"Floating field width {fl} is not between 2 and 8." raise TypeError(msg) @@ -361,7 +357,6 @@ def _read_header(self): header = self._get_row() if not header == _correct_obs_header: - self.close() raise ValueError("Observation header not found.") self.fields = fields diff --git a/pandas/tests/io/parser/test_compression.py b/pandas/tests/io/parser/test_compression.py index 5680669f75aa3..6e957313d8de8 100644 --- a/pandas/tests/io/parser/test_compression.py +++ b/pandas/tests/io/parser/test_compression.py @@ -23,7 +23,7 @@ def parser_and_data(all_parsers, csv1): with open(csv1, "rb") as f: data = f.read() - expected = parser.read_csv(csv1) + expected = parser.read_csv(csv1) return parser, data, expected