Skip to content

Commit f03b4a0

Browse files
Merge remote-tracking branch 'upstream/master' into GH28501
2 parents 9ac0c72 + 342640b commit f03b4a0

File tree

8 files changed

+58
-48
lines changed

8 files changed

+58
-48
lines changed

pandas/io/excel/_base.py

+30-27
Original file line numberDiff line numberDiff line change
@@ -316,33 +316,36 @@ def read_excel(
316316
"an ExcelFile - ExcelFile already has the engine set"
317317
)
318318

319-
data = io.parse(
320-
sheet_name=sheet_name,
321-
header=header,
322-
names=names,
323-
index_col=index_col,
324-
usecols=usecols,
325-
squeeze=squeeze,
326-
dtype=dtype,
327-
converters=converters,
328-
true_values=true_values,
329-
false_values=false_values,
330-
skiprows=skiprows,
331-
nrows=nrows,
332-
na_values=na_values,
333-
keep_default_na=keep_default_na,
334-
na_filter=na_filter,
335-
verbose=verbose,
336-
parse_dates=parse_dates,
337-
date_parser=date_parser,
338-
thousands=thousands,
339-
comment=comment,
340-
skipfooter=skipfooter,
341-
convert_float=convert_float,
342-
mangle_dupe_cols=mangle_dupe_cols,
343-
)
344-
if should_close:
345-
io.close()
319+
try:
320+
data = io.parse(
321+
sheet_name=sheet_name,
322+
header=header,
323+
names=names,
324+
index_col=index_col,
325+
usecols=usecols,
326+
squeeze=squeeze,
327+
dtype=dtype,
328+
converters=converters,
329+
true_values=true_values,
330+
false_values=false_values,
331+
skiprows=skiprows,
332+
nrows=nrows,
333+
na_values=na_values,
334+
keep_default_na=keep_default_na,
335+
na_filter=na_filter,
336+
verbose=verbose,
337+
parse_dates=parse_dates,
338+
date_parser=date_parser,
339+
thousands=thousands,
340+
comment=comment,
341+
skipfooter=skipfooter,
342+
convert_float=convert_float,
343+
mangle_dupe_cols=mangle_dupe_cols,
344+
)
345+
finally:
346+
# make sure to close opened file handles
347+
if should_close:
348+
io.close()
346349
return data
347350

348351

pandas/io/formats/excel.py

+13-10
Original file line numberDiff line numberDiff line change
@@ -818,6 +818,7 @@ def write(
818818
f"Max sheet size is: {self.max_rows}, {self.max_cols}"
819819
)
820820

821+
formatted_cells = self.get_formatted_cells()
821822
if isinstance(writer, ExcelWriter):
822823
need_save = False
823824
else:
@@ -829,13 +830,15 @@ def write(
829830
)
830831
need_save = True
831832

832-
formatted_cells = self.get_formatted_cells()
833-
writer.write_cells(
834-
formatted_cells,
835-
sheet_name,
836-
startrow=startrow,
837-
startcol=startcol,
838-
freeze_panes=freeze_panes,
839-
)
840-
if need_save:
841-
writer.save()
833+
try:
834+
writer.write_cells(
835+
formatted_cells,
836+
sheet_name,
837+
startrow=startrow,
838+
startcol=startcol,
839+
freeze_panes=freeze_panes,
840+
)
841+
finally:
842+
# make sure to close opened file handles
843+
if need_save:
844+
writer.close()

pandas/io/json/_json.py

+1
Original file line numberDiff line numberDiff line change
@@ -631,6 +631,7 @@ def _preprocess_data(self, data):
631631
"""
632632
if hasattr(data, "read") and (not self.chunksize or not self.nrows):
633633
data = data.read()
634+
self.close()
634635
if not hasattr(data, "read") and (self.chunksize or self.nrows):
635636
data = StringIO(data)
636637

pandas/io/parsers.py

+2
Original file line numberDiff line numberDiff line change
@@ -2066,6 +2066,7 @@ def read(self, nrows=None):
20662066
return index, columns, col_dict
20672067

20682068
else:
2069+
self.close()
20692070
raise
20702071

20712072
# Done with first read, next time raise StopIteration
@@ -2449,6 +2450,7 @@ def read(self, rows=None):
24492450
if self._first_chunk:
24502451
content = []
24512452
else:
2453+
self.close()
24522454
raise
24532455

24542456
# done with first read, next time raise StopIteration

pandas/io/sas/sas7bdat.py

+1-3
Original file line numberDiff line numberDiff line change
@@ -203,7 +203,6 @@ def _get_properties(self):
203203
self._path_or_buf.seek(0)
204204
self._cached_page = self._path_or_buf.read(288)
205205
if self._cached_page[0 : len(const.magic)] != const.magic:
206-
self.close()
207206
raise ValueError("magic number mismatch (not a SAS file?)")
208207

209208
# Get alignment information
@@ -279,7 +278,6 @@ def _get_properties(self):
279278
buf = self._path_or_buf.read(self.header_length - 288)
280279
self._cached_page += buf
281280
if len(self._cached_page) != self.header_length:
282-
self.close()
283281
raise ValueError("The SAS7BDAT file appears to be truncated.")
284282

285283
self._page_length = self._read_int(
@@ -333,6 +331,7 @@ def _get_properties(self):
333331
def __next__(self):
334332
da = self.read(nrows=self.chunksize or 1)
335333
if da is None:
334+
self.close()
336335
raise StopIteration
337336
return da
338337

@@ -377,7 +376,6 @@ def _parse_metadata(self):
377376
if len(self._cached_page) <= 0:
378377
break
379378
if len(self._cached_page) != self._page_length:
380-
self.close()
381379
raise ValueError("Failed to read a meta data page from the SAS file.")
382380
done = self._process_page_meta()
383381

pandas/io/sas/sas_xport.py

-5
Original file line numberDiff line numberDiff line change
@@ -276,14 +276,12 @@ def _read_header(self):
276276
# read file header
277277
line1 = self._get_row()
278278
if line1 != _correct_line1:
279-
self.close()
280279
raise ValueError("Header record is not an XPORT file.")
281280

282281
line2 = self._get_row()
283282
fif = [["prefix", 24], ["version", 8], ["OS", 8], ["_", 24], ["created", 16]]
284283
file_info = _split_line(line2, fif)
285284
if file_info["prefix"] != "SAS SAS SASLIB":
286-
self.close()
287285
raise ValueError("Header record has invalid prefix.")
288286
file_info["created"] = _parse_date(file_info["created"])
289287
self.file_info = file_info
@@ -297,7 +295,6 @@ def _read_header(self):
297295
headflag1 = header1.startswith(_correct_header1)
298296
headflag2 = header2 == _correct_header2
299297
if not (headflag1 and headflag2):
300-
self.close()
301298
raise ValueError("Member header not found")
302299
# usually 140, could be 135
303300
fieldnamelength = int(header1[-5:-2])
@@ -346,7 +343,6 @@ def _read_header(self):
346343
field["ntype"] = types[field["ntype"]]
347344
fl = field["field_length"]
348345
if field["ntype"] == "numeric" and ((fl < 2) or (fl > 8)):
349-
self.close()
350346
msg = f"Floating field width {fl} is not between 2 and 8."
351347
raise TypeError(msg)
352348

@@ -361,7 +357,6 @@ def _read_header(self):
361357

362358
header = self._get_row()
363359
if not header == _correct_obs_header:
364-
self.close()
365360
raise ValueError("Observation header not found.")
366361

367362
self.fields = fields

pandas/tests/io/parser/test_compression.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ def parser_and_data(all_parsers, csv1):
2323

2424
with open(csv1, "rb") as f:
2525
data = f.read()
26-
expected = parser.read_csv(csv1)
26+
expected = parser.read_csv(csv1)
2727

2828
return parser, data, expected
2929

pandas/tests/plotting/test_converter.py

+10-2
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77

88
import pandas._config.config as cf
99

10+
from pandas.compat import is_platform_windows
1011
from pandas.compat.numpy import np_datetime64_compat
1112
import pandas.util._test_decorators as td
1213

@@ -72,15 +73,22 @@ def test_registering_no_warning(self):
7273
ax.plot(s.index, s.values)
7374
plt.close()
7475

76+
@pytest.mark.xfail(
77+
is_platform_windows(),
78+
reason="Getting two warnings intermittently, see GH#37746",
79+
strict=False,
80+
)
7581
def test_pandas_plots_register(self):
7682
plt = pytest.importorskip("matplotlib.pyplot")
7783
s = Series(range(12), index=date_range("2017", periods=12))
7884
# Set to the "warn" state, in case this isn't the first test run
7985
with tm.assert_produces_warning(None) as w:
8086
s.plot()
8187

82-
assert len(w) == 0
83-
plt.close()
88+
try:
89+
assert len(w) == 0
90+
finally:
91+
plt.close()
8492

8593
def test_matplotlib_formatters(self):
8694
units = pytest.importorskip("matplotlib.units")

0 commit comments

Comments
 (0)