diff --git a/pandas/_config/display.py b/pandas/_config/display.py index ef319f4447565..e4553a2107f87 100644 --- a/pandas/_config/display.py +++ b/pandas/_config/display.py @@ -22,7 +22,7 @@ def detect_console_encoding() -> str: encoding = None try: encoding = sys.stdout.encoding or sys.stdin.encoding - except (AttributeError, IOError): + except (AttributeError, OSError): pass # try again for something better diff --git a/pandas/_testing.py b/pandas/_testing.py index 9db0c3496e290..cd34bec52daef 100644 --- a/pandas/_testing.py +++ b/pandas/_testing.py @@ -1960,8 +1960,7 @@ def index_subclass_makers_generator(): makeCategoricalIndex, makeMultiIndex, ] - for make_index_func in make_index_funcs: - yield make_index_func + yield from make_index_funcs def all_timeseries_index_generator(k=10): diff --git a/pandas/_vendored/typing_extensions.py b/pandas/_vendored/typing_extensions.py index 53df8da175a56..129d8998faccc 100644 --- a/pandas/_vendored/typing_extensions.py +++ b/pandas/_vendored/typing_extensions.py @@ -409,7 +409,7 @@ def __repr__(self): def __getitem__(self, parameters): item = typing._type_check( - parameters, "{} accepts only single type".format(self._name) + parameters, f"{self._name} accepts only single type" ) return _GenericAlias(self, (item,)) @@ -1671,7 +1671,7 @@ def __class_getitem__(cls, params): params = (params,) if not params and cls is not Tuple: raise TypeError( - "Parameter list to {}[...] cannot be empty".format(cls.__qualname__) + f"Parameter list to {cls.__qualname__}[...] cannot be empty" ) msg = "Parameters to generic types must be types." params = tuple(_type_check(p, msg) for p in params) @@ -2113,7 +2113,7 @@ def __class_getitem__(cls, params): return _AnnotatedAlias(origin, metadata) def __init_subclass__(cls, *args, **kwargs): - raise TypeError("Cannot subclass {}.Annotated".format(cls.__module__)) + raise TypeError(f"Cannot subclass {cls.__module__}.Annotated") def _strip_annotations(t): """Strips the annotations from a given type. @@ -2195,7 +2195,7 @@ def _tree_repr(self, tree): else: tp_repr = origin[0]._tree_repr(origin) metadata_reprs = ", ".join(repr(arg) for arg in metadata) - return "%s[%s, %s]" % (cls, tp_repr, metadata_reprs) + return f"{cls}[{tp_repr}, {metadata_reprs}]" def _subs_tree(self, tvars=None, args=None): # noqa if self is Annotated: @@ -2382,7 +2382,7 @@ def TypeAlias(self, parameters): It's invalid when used anywhere except as in the example above. """ - raise TypeError("{} is not subscriptable".format(self)) + raise TypeError(f"{self} is not subscriptable") elif sys.version_info[:2] >= (3, 7): diff --git a/pandas/_version.py b/pandas/_version.py index 66e756a4744c8..b3fa8530d09eb 100644 --- a/pandas/_version.py +++ b/pandas/_version.py @@ -74,7 +74,7 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False): stderr=(subprocess.PIPE if hide_stderr else None), ) break - except EnvironmentError: + except OSError: e = sys.exc_info()[1] if e.errno == errno.ENOENT: continue @@ -121,7 +121,7 @@ def git_get_keywords(versionfile_abs): # _version.py. keywords = {} try: - f = open(versionfile_abs, "r") + f = open(versionfile_abs) for line in f.readlines(): if line.strip().startswith("git_refnames ="): mo = re.search(r'=\s*"(.*)"', line) @@ -132,7 +132,7 @@ def git_get_keywords(versionfile_abs): if mo: keywords["full"] = mo.group(1) f.close() - except EnvironmentError: + except OSError: pass return keywords diff --git a/pandas/io/clipboard/__init__.py b/pandas/io/clipboard/__init__.py index d16955a98b62f..a8020f4bb4e4f 100644 --- a/pandas/io/clipboard/__init__.py +++ b/pandas/io/clipboard/__init__.py @@ -274,7 +274,7 @@ def copy_dev_clipboard(text): fo.write(text) def paste_dev_clipboard() -> str: - with open("/dev/clipboard", "rt") as fo: + with open("/dev/clipboard") as fo: content = fo.read() return content @@ -521,7 +521,7 @@ def determine_clipboard(): return init_windows_clipboard() if platform.system() == "Linux": - with open("/proc/version", "r") as f: + with open("/proc/version") as f: if "Microsoft" in f.read(): return init_wsl_clipboard() diff --git a/pandas/io/formats/excel.py b/pandas/io/formats/excel.py index bf4586a4b5b96..cc7b6b0bfea97 100644 --- a/pandas/io/formats/excel.py +++ b/pandas/io/formats/excel.py @@ -587,8 +587,7 @@ def _format_regular_rows(self): else: coloffset = 0 - for cell in self._generate_body(coloffset): - yield cell + yield from self._generate_body(coloffset) def _format_hierarchical_rows(self): has_aliases = isinstance(self.header, (tuple, list, np.ndarray, ABCIndex)) @@ -664,8 +663,7 @@ def _format_hierarchical_rows(self): ) gcolidx += 1 - for cell in self._generate_body(gcolidx): - yield cell + yield from self._generate_body(gcolidx) def _generate_body(self, coloffset: int): if self.styler is None: diff --git a/pandas/io/html.py b/pandas/io/html.py index 40fde224a7ae9..9a91b16e52723 100644 --- a/pandas/io/html.py +++ b/pandas/io/html.py @@ -719,7 +719,7 @@ def _build_doc(self): r = r.getroot() except AttributeError: pass - except (UnicodeDecodeError, IOError) as e: + except (UnicodeDecodeError, OSError) as e: # if the input is a blob of html goop if not is_url(self.io): r = fromstring(self.io, parser=parser) diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py index c3977f89ac42f..a0ceb18c8bd20 100644 --- a/pandas/io/json/_json.py +++ b/pandas/io/json/_json.py @@ -821,7 +821,7 @@ def close(self): if self.should_close: try: self.open_stream.close() - except (IOError, AttributeError): + except (OSError, AttributeError): pass for file_handle in self.file_handles: file_handle.close() diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index e850a101a0a63..5e5a89d96f0e5 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -364,7 +364,7 @@ def read_hdf( if isinstance(path_or_buf, HDFStore): if not path_or_buf.is_open: - raise IOError("The HDFStore must be open for reading.") + raise OSError("The HDFStore must be open for reading.") store = path_or_buf auto_close = False @@ -693,7 +693,7 @@ def open(self, mode: str = "a", **kwargs): try: self._handle = tables.open_file(self._path, self._mode, **kwargs) - except IOError as err: # pragma: no cover + except OSError as err: # pragma: no cover if "can not be written" in str(err): print(f"Opening {self._path} in read-only mode") self._handle = tables.open_file(self._path, "r", **kwargs) @@ -724,7 +724,7 @@ def open(self, mode: str = "a", **kwargs): # trying to read from a non-existent file causes an error which # is not part of IOError, make it one if self._mode == "r" and "Unable to open/create file" in str(err): - raise IOError(str(err)) from err + raise OSError(str(err)) from err raise def close(self): diff --git a/pandas/io/stata.py b/pandas/io/stata.py index df5f6c3d53d30..a8af84e42918d 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -1077,7 +1077,7 @@ def close(self) -> None: """ close the handle if its open """ try: self.path_or_buf.close() - except IOError: + except OSError: pass def _set_encoding(self) -> None: diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py index 602b42022f561..0c64ea824996f 100644 --- a/pandas/plotting/_matplotlib/core.py +++ b/pandas/plotting/_matplotlib/core.py @@ -342,7 +342,7 @@ def _setup_subplots(self): valid_log = {False, True, "sym", None} input_log = {self.logx, self.logy, self.loglog} if input_log - valid_log: - invalid_log = next(iter((input_log - valid_log))) + invalid_log = next(iter(input_log - valid_log)) raise ValueError( f"Boolean, None and 'sym' are valid options, '{invalid_log}' is given." ) diff --git a/pandas/tests/arrays/categorical/test_constructors.py b/pandas/tests/arrays/categorical/test_constructors.py index 89fbfbd5b8324..e200f13652a84 100644 --- a/pandas/tests/arrays/categorical/test_constructors.py +++ b/pandas/tests/arrays/categorical/test_constructors.py @@ -277,7 +277,7 @@ def test_constructor_with_generator(self): # returned a scalar for a generator exp = Categorical([0, 1, 2]) - cat = Categorical((x for x in [0, 1, 2])) + cat = Categorical(x for x in [0, 1, 2]) tm.assert_categorical_equal(cat, exp) cat = Categorical(range(3)) tm.assert_categorical_equal(cat, exp) diff --git a/pandas/tests/arrays/integer/test_construction.py b/pandas/tests/arrays/integer/test_construction.py index 1893c4554bfbf..e0a4877da6c7e 100644 --- a/pandas/tests/arrays/integer/test_construction.py +++ b/pandas/tests/arrays/integer/test_construction.py @@ -29,7 +29,7 @@ def test_from_dtype_from_float(data): # from int / array expected = pd.Series(data).dropna().reset_index(drop=True) - dropped = np.array(data.dropna()).astype(np.dtype((dtype.type))) + dropped = np.array(data.dropna()).astype(np.dtype(dtype.type)) result = pd.Series(dropped, dtype=str(dtype)) tm.assert_series_equal(result, expected) diff --git a/pandas/tests/extension/decimal/array.py b/pandas/tests/extension/decimal/array.py index 2fbeec8dd8378..9147360e71c73 100644 --- a/pandas/tests/extension/decimal/array.py +++ b/pandas/tests/extension/decimal/array.py @@ -167,7 +167,7 @@ def _na_value(self): def _formatter(self, boxed=False): if boxed: - return "Decimal: {0}".format + return "Decimal: {}".format return repr @classmethod diff --git a/pandas/tests/extension/test_categorical.py b/pandas/tests/extension/test_categorical.py index f7b572a70073a..7d03dadb20dd9 100644 --- a/pandas/tests/extension/test_categorical.py +++ b/pandas/tests/extension/test_categorical.py @@ -137,7 +137,7 @@ def test_combine_add(self, data_repeated): s2 = pd.Series(orig_data2) result = s1.combine(s2, lambda x1, x2: x1 + x2) expected = pd.Series( - ([a + b for (a, b) in zip(list(orig_data1), list(orig_data2))]) + [a + b for (a, b) in zip(list(orig_data1), list(orig_data2))] ) self.assert_series_equal(result, expected) diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index 63a2160e128ed..b5e211895672a 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -71,7 +71,7 @@ def test_series_with_name_not_matching_column(self): lambda: DataFrame({}), lambda: DataFrame(()), lambda: DataFrame([]), - lambda: DataFrame((_ for _ in [])), + lambda: DataFrame(_ for _ in []), lambda: DataFrame(range(0)), lambda: DataFrame(data=None), lambda: DataFrame(data={}), diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 1bb40b322cd48..6783fc5b66433 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -249,8 +249,8 @@ def test_len(): # issue 11016 df = pd.DataFrame(dict(a=[np.nan] * 3, b=[1, 2, 3])) - assert len(df.groupby(("a"))) == 0 - assert len(df.groupby(("b"))) == 3 + assert len(df.groupby("a")) == 0 + assert len(df.groupby("b")) == 3 assert len(df.groupby(["a", "b"])) == 3 diff --git a/pandas/tests/groupby/test_grouping.py b/pandas/tests/groupby/test_grouping.py index 40b4ce46e550b..18ef95c05f291 100644 --- a/pandas/tests/groupby/test_grouping.py +++ b/pandas/tests/groupby/test_grouping.py @@ -739,7 +739,7 @@ def test_get_group(self): with pytest.raises(ValueError, match=msg): g.get_group("foo") with pytest.raises(ValueError, match=msg): - g.get_group(("foo")) + g.get_group("foo") msg = "must supply a same-length tuple to get_group with multiple grouping keys" with pytest.raises(ValueError, match=msg): g.get_group(("foo", "bar", "baz")) diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index 7720db9d98ebf..f811bd579aaaa 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -1360,7 +1360,7 @@ def test_get_indexer_strings_raises(self): def test_get_indexer_numeric_index_boolean_target(self, idx_class): # GH 16877 - numeric_index = idx_class(RangeIndex((4))) + numeric_index = idx_class(RangeIndex(4)) result = numeric_index.get_indexer([True, False, True]) expected = np.array([-1, -1, -1], dtype=np.intp) tm.assert_numpy_array_equal(result, expected) diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py index ca8a3ddc95575..0cc61cd7df389 100644 --- a/pandas/tests/indexing/test_indexing.py +++ b/pandas/tests/indexing/test_indexing.py @@ -745,7 +745,7 @@ def run_tests(df, rhs, right): # make frames multi-type & re-run tests for frame in [df, rhs, right]: frame["joe"] = frame["joe"].astype("float64") - frame["jolie"] = frame["jolie"].map("@{0}".format) + frame["jolie"] = frame["jolie"].map("@{}".format) run_tests(df, rhs, right) diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py index f00fa6274fca2..cce0783a3c867 100644 --- a/pandas/tests/io/formats/test_format.py +++ b/pandas/tests/io/formats/test_format.py @@ -648,7 +648,7 @@ def test_to_string_unicode_columns(self, float_frame): assert isinstance(result, str) def test_to_string_utf8_columns(self): - n = "\u05d0".encode("utf-8") + n = "\u05d0".encode() with option_context("display.max_rows", 1): df = DataFrame([1, 2], columns=[n]) diff --git a/pandas/tests/io/formats/test_to_csv.py b/pandas/tests/io/formats/test_to_csv.py index c40935b2cc5dd..e2ceb95d77053 100644 --- a/pandas/tests/io/formats/test_to_csv.py +++ b/pandas/tests/io/formats/test_to_csv.py @@ -26,7 +26,7 @@ def test_to_csv_with_single_column(self): """ with tm.ensure_clean("test.csv") as path: df1.to_csv(path, header=None, index=None) - with open(path, "r") as f: + with open(path) as f: assert f.read() == expected1 df2 = DataFrame([1, None]) @@ -36,7 +36,7 @@ def test_to_csv_with_single_column(self): """ with tm.ensure_clean("test.csv") as path: df2.to_csv(path, header=None, index=None) - with open(path, "r") as f: + with open(path) as f: assert f.read() == expected2 def test_to_csv_defualt_encoding(self): @@ -58,7 +58,7 @@ def test_to_csv_quotechar(self): with tm.ensure_clean("test.csv") as path: df.to_csv(path, quoting=1) # 1=QUOTE_ALL - with open(path, "r") as f: + with open(path) as f: assert f.read() == expected expected = """\ @@ -69,7 +69,7 @@ def test_to_csv_quotechar(self): with tm.ensure_clean("test.csv") as path: df.to_csv(path, quoting=1, quotechar="$") - with open(path, "r") as f: + with open(path) as f: assert f.read() == expected with tm.ensure_clean("test.csv") as path: @@ -86,7 +86,7 @@ def test_to_csv_doublequote(self): with tm.ensure_clean("test.csv") as path: df.to_csv(path, quoting=1, doublequote=True) # QUOTE_ALL - with open(path, "r") as f: + with open(path) as f: assert f.read() == expected from _csv import Error @@ -105,7 +105,7 @@ def test_to_csv_escapechar(self): with tm.ensure_clean("test.csv") as path: # QUOTE_ALL df.to_csv(path, quoting=1, doublequote=False, escapechar="\\") - with open(path, "r") as f: + with open(path) as f: assert f.read() == expected df = DataFrame({"col": ["a,a", ",bb,"]}) @@ -117,7 +117,7 @@ def test_to_csv_escapechar(self): with tm.ensure_clean("test.csv") as path: df.to_csv(path, quoting=3, escapechar="\\") # QUOTE_NONE - with open(path, "r") as f: + with open(path) as f: assert f.read() == expected def test_csv_to_string(self): @@ -342,7 +342,7 @@ def test_to_csv_string_array_ascii(self): """ with tm.ensure_clean("str_test.csv") as path: df.to_csv(path, encoding="ascii") - with open(path, "r") as f: + with open(path) as f: assert f.read() == expected_ascii def test_to_csv_string_array_utf8(self): @@ -356,7 +356,7 @@ def test_to_csv_string_array_utf8(self): """ with tm.ensure_clean("unicode_test.csv") as path: df.to_csv(path, encoding="utf-8") - with open(path, "r") as f: + with open(path) as f: assert f.read() == expected_utf8 def test_to_csv_string_with_lf(self): @@ -467,7 +467,7 @@ def test_to_csv_write_to_open_file(self): with open(path, "w") as f: f.write("manual header\n") df.to_csv(f, header=None, index=None) - with open(path, "r") as f: + with open(path) as f: assert f.read() == expected def test_to_csv_write_to_open_file_with_newline_py3(self): diff --git a/pandas/tests/io/formats/test_to_html.py b/pandas/tests/io/formats/test_to_html.py index e85fd398964d0..7acdbfd462874 100644 --- a/pandas/tests/io/formats/test_to_html.py +++ b/pandas/tests/io/formats/test_to_html.py @@ -137,7 +137,7 @@ def test_to_html_encoding(float_frame, tmp_path): # GH 28663 path = tmp_path / "test.html" float_frame.to_html(path, encoding="gbk") - with open(str(path), "r", encoding="gbk") as f: + with open(str(path), encoding="gbk") as f: assert float_frame.to_html() == f.read() diff --git a/pandas/tests/io/formats/test_to_latex.py b/pandas/tests/io/formats/test_to_latex.py index a98644250b328..a93ab6f9cc7aa 100644 --- a/pandas/tests/io/formats/test_to_latex.py +++ b/pandas/tests/io/formats/test_to_latex.py @@ -21,7 +21,7 @@ def test_to_latex_filename(self, float_frame): with tm.ensure_clean("test.tex") as path: float_frame.to_latex(path) - with open(path, "r") as f: + with open(path) as f: assert float_frame.to_latex() == f.read() # test with utf-8 and encoding option (GH 7061) diff --git a/pandas/tests/io/json/test_ujson.py b/pandas/tests/io/json/test_ujson.py index e2007e07c572a..086c0b7ba08b2 100644 --- a/pandas/tests/io/json/test_ujson.py +++ b/pandas/tests/io/json/test_ujson.py @@ -591,14 +591,14 @@ def test_decode_number_with_32bit_sign_bit(self, val): def test_encode_big_escape(self): # Make sure no Exception is raised. for _ in range(10): - base = "\u00e5".encode("utf-8") + base = "\u00e5".encode() escape_input = base * 1024 * 1024 * 2 ujson.encode(escape_input) def test_decode_big_escape(self): # Make sure no Exception is raised. for _ in range(10): - base = "\u00e5".encode("utf-8") + base = "\u00e5".encode() quote = b'"' escape_input = quote + (base * 1024 * 1024 * 2) + quote diff --git a/pandas/tests/io/parser/test_c_parser_only.py b/pandas/tests/io/parser/test_c_parser_only.py index 7c58afe867440..ae63b6af3a8b6 100644 --- a/pandas/tests/io/parser/test_c_parser_only.py +++ b/pandas/tests/io/parser/test_c_parser_only.py @@ -577,7 +577,7 @@ def test_file_handles_mmap(c_parser_only, csv1): # Don't close user provided file handles. parser = c_parser_only - with open(csv1, "r") as f: + with open(csv1) as f: m = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) parser.read_csv(m) diff --git a/pandas/tests/io/parser/test_common.py b/pandas/tests/io/parser/test_common.py index 1d8d5a29686a4..49358fe2ecfe4 100644 --- a/pandas/tests/io/parser/test_common.py +++ b/pandas/tests/io/parser/test_common.py @@ -1726,7 +1726,7 @@ def test_iteration_open_handle(all_parsers): with open(path, "w") as f: f.write("AAA\nBBB\nCCC\nDDD\nEEE\nFFF\nGGG") - with open(path, "r") as f: + with open(path) as f: for line in f: if "CCC" in line: break diff --git a/pandas/tests/io/parser/test_encoding.py b/pandas/tests/io/parser/test_encoding.py index de7b3bed034c7..f23b498c7388a 100644 --- a/pandas/tests/io/parser/test_encoding.py +++ b/pandas/tests/io/parser/test_encoding.py @@ -27,7 +27,7 @@ def test_bytes_io_input(all_parsers): def test_read_csv_unicode(all_parsers): parser = all_parsers - data = BytesIO("\u0141aski, Jan;1".encode("utf-8")) + data = BytesIO("\u0141aski, Jan;1".encode()) result = parser.read_csv(data, sep=";", encoding="utf-8", header=None) expected = DataFrame([["\u0141aski, Jan", 1]]) diff --git a/pandas/tests/io/parser/test_read_fwf.py b/pandas/tests/io/parser/test_read_fwf.py index e982667f06f31..127d0dc4c9829 100644 --- a/pandas/tests/io/parser/test_read_fwf.py +++ b/pandas/tests/io/parser/test_read_fwf.py @@ -173,9 +173,7 @@ def test_read_csv_compat(): def test_bytes_io_input(): - result = read_fwf( - BytesIO("שלום\nשלום".encode("utf8")), widths=[2, 2], encoding="utf8" - ) + result = read_fwf(BytesIO("שלום\nשלום".encode()), widths=[2, 2], encoding="utf8") expected = DataFrame([["של", "ום"]], columns=["של", "ום"]) tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/io/pytables/common.py b/pandas/tests/io/pytables/common.py index aad18890de3ad..7e7a76e287d32 100644 --- a/pandas/tests/io/pytables/common.py +++ b/pandas/tests/io/pytables/common.py @@ -25,7 +25,7 @@ def safe_close(store): try: if store is not None: store.close() - except IOError: + except OSError: pass diff --git a/pandas/tests/io/test_common.py b/pandas/tests/io/test_common.py index 85a12a13d19fb..ede8d61490778 100644 --- a/pandas/tests/io/test_common.py +++ b/pandas/tests/io/test_common.py @@ -339,7 +339,7 @@ def test_constructor_bad_file(self, mmap_file): with pytest.raises(err, match=msg): icom._MMapWrapper(non_file) - target = open(mmap_file, "r") + target = open(mmap_file) target.close() msg = "I/O operation on closed file" @@ -347,7 +347,7 @@ def test_constructor_bad_file(self, mmap_file): icom._MMapWrapper(target) def test_get_attr(self, mmap_file): - with open(mmap_file, "r") as target: + with open(mmap_file) as target: wrapper = icom._MMapWrapper(target) attrs = dir(wrapper.mmap) @@ -360,7 +360,7 @@ def test_get_attr(self, mmap_file): assert not hasattr(wrapper, "foo") def test_next(self, mmap_file): - with open(mmap_file, "r") as target: + with open(mmap_file) as target: wrapper = icom._MMapWrapper(target) lines = target.readlines() diff --git a/pandas/tests/io/test_html.py b/pandas/tests/io/test_html.py index 2c93dbb5b6b83..59034e9f3d807 100644 --- a/pandas/tests/io/test_html.py +++ b/pandas/tests/io/test_html.py @@ -114,7 +114,7 @@ def test_to_html_compat(self): c_idx_names=False, r_idx_names=False, ) - .applymap("{0:.3f}".format) + .applymap("{:.3f}".format) .astype(float) ) out = df.to_html() @@ -616,7 +616,7 @@ def try_remove_ws(x): @pytest.mark.slow def test_gold_canyon(self): gc = "Gold Canyon" - with open(self.banklist_data, "r") as f: + with open(self.banklist_data) as f: raw_text = f.read() assert gc in raw_text diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py index 1edcc937f72c3..32a15e6201037 100644 --- a/pandas/tests/io/test_sql.py +++ b/pandas/tests/io/test_sql.py @@ -281,7 +281,6 @@ def _get_exec(self): @pytest.fixture(params=[("io", "data", "csv", "iris.csv")]) def load_iris_data(self, datapath, request): - import io iris_csv_file = datapath(*request.param) @@ -291,7 +290,7 @@ def load_iris_data(self, datapath, request): self.drop_table("iris") self._get_exec().execute(SQL_STRINGS["create_iris"][self.flavor]) - with io.open(iris_csv_file, mode="r", newline=None) as iris_csv: + with open(iris_csv_file, mode="r", newline=None) as iris_csv: r = csv.reader(iris_csv) next(r) # skip header row ins = SQL_STRINGS["insert_iris"][self.flavor] diff --git a/pandas/tests/reshape/test_get_dummies.py b/pandas/tests/reshape/test_get_dummies.py index ce13762ea8f86..82e0e52c089a2 100644 --- a/pandas/tests/reshape/test_get_dummies.py +++ b/pandas/tests/reshape/test_get_dummies.py @@ -386,7 +386,7 @@ def test_dataframe_dummies_with_categorical(self, df, sparse, dtype): "get_dummies_kwargs,expected", [ ( - {"data": DataFrame(({"ä": ["a"]}))}, + {"data": DataFrame({"ä": ["a"]})}, DataFrame({"ä_a": [1]}, dtype=np.uint8), ), ( diff --git a/pandas/tests/scalar/test_na_scalar.py b/pandas/tests/scalar/test_na_scalar.py index 0a7dfbee4e672..5c4d7e191d1bb 100644 --- a/pandas/tests/scalar/test_na_scalar.py +++ b/pandas/tests/scalar/test_na_scalar.py @@ -28,9 +28,9 @@ def test_format(): assert format(NA, ">10") == " " assert format(NA, "xxx") == "" # NA is flexible, accept any format spec - assert "{}".format(NA) == "" - assert "{:>10}".format(NA) == " " - assert "{:xxx}".format(NA) == "" + assert f"{NA}" == "" + assert f"{NA:>10}" == " " + assert f"{NA:xxx}" == "" def test_truthiness(): diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py index e39083b709f38..6ba55ce3c74b9 100644 --- a/pandas/tests/series/test_analytics.py +++ b/pandas/tests/series/test_analytics.py @@ -180,7 +180,7 @@ def test_td64_summation_overflow(self): # mean result = (s - s.min()).mean() - expected = pd.Timedelta((pd.TimedeltaIndex((s - s.min())).asi8 / len(s)).sum()) + expected = pd.Timedelta((pd.TimedeltaIndex(s - s.min()).asi8 / len(s)).sum()) # the computation is converted to float so # might be some loss of precision diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py index 8ac0a55e63cd1..1b5fddaf14335 100644 --- a/pandas/tests/series/test_constructors.py +++ b/pandas/tests/series/test_constructors.py @@ -49,7 +49,7 @@ class TestSeriesConstructors: (lambda: Series({}), True), (lambda: Series(()), False), # creates a RangeIndex (lambda: Series([]), False), # creates a RangeIndex - (lambda: Series((_ for _ in [])), False), # creates a RangeIndex + (lambda: Series(_ for _ in []), False), # creates a RangeIndex (lambda: Series(data=None), True), (lambda: Series(data={}), True), (lambda: Series(data=()), False), # creates a RangeIndex @@ -222,8 +222,7 @@ def test_constructor_iterable(self): # GH 21987 class Iter: def __iter__(self): - for i in range(10): - yield i + yield from range(10) expected = Series(list(range(10)), dtype="int64") result = Series(Iter(), dtype="int64") diff --git a/pandas/tests/series/test_dtypes.py b/pandas/tests/series/test_dtypes.py index bcc0b18134dad..ae89e16ca7667 100644 --- a/pandas/tests/series/test_dtypes.py +++ b/pandas/tests/series/test_dtypes.py @@ -137,13 +137,13 @@ def test_astype_str_cast_dt64(self): ts = Series([Timestamp("2010-01-04 00:00:00")]) s = ts.astype(str) - expected = Series([str("2010-01-04")]) + expected = Series(["2010-01-04"]) tm.assert_series_equal(s, expected) ts = Series([Timestamp("2010-01-04 00:00:00", tz="US/Eastern")]) s = ts.astype(str) - expected = Series([str("2010-01-04 00:00:00-05:00")]) + expected = Series(["2010-01-04 00:00:00-05:00"]) tm.assert_series_equal(s, expected) def test_astype_str_cast_td64(self): @@ -152,7 +152,7 @@ def test_astype_str_cast_td64(self): td = Series([Timedelta(1, unit="d")]) ser = td.astype(str) - expected = Series([str("1 days")]) + expected = Series(["1 days"]) tm.assert_series_equal(ser, expected) def test_astype_unicode(self): @@ -167,7 +167,7 @@ def test_astype_unicode(self): former_encoding = None if sys.getdefaultencoding() == "utf-8": - test_series.append(Series(["野菜食べないとやばい".encode("utf-8")])) + test_series.append(Series(["野菜食べないとやばい".encode()])) for s in test_series: res = s.astype("unicode") diff --git a/pandas/tests/series/test_io.py b/pandas/tests/series/test_io.py index 708118e950686..b12ebd58e6a7b 100644 --- a/pandas/tests/series/test_io.py +++ b/pandas/tests/series/test_io.py @@ -66,12 +66,11 @@ def test_from_csv(self, datetime_series, string_series): tm.assert_series_equal(check_series, series) def test_to_csv(self, datetime_series): - import io with tm.ensure_clean() as path: datetime_series.to_csv(path, header=False) - with io.open(path, newline=None) as f: + with open(path, newline=None) as f: lines = f.readlines() assert lines[1] != "\n" diff --git a/scripts/validate_rst_title_capitalization.py b/scripts/validate_rst_title_capitalization.py index 62ec6b9ef07af..b654e27737359 100755 --- a/scripts/validate_rst_title_capitalization.py +++ b/scripts/validate_rst_title_capitalization.py @@ -212,7 +212,7 @@ def find_titles(rst_file: str) -> Iterable[Tuple[str, int]]: The corresponding line number of the heading. """ - with open(rst_file, "r") as fd: + with open(rst_file) as fd: previous_line = "" for i, line in enumerate(fd): line = line[:-1] @@ -250,10 +250,9 @@ def find_rst_files(source_paths: List[str]) -> Iterable[str]: elif directory_address.endswith(".rst"): yield directory_address else: - for filename in glob.glob( + yield from glob.glob( pathname=f"{directory_address}/**/*.rst", recursive=True - ): - yield filename + ) def main(source_paths: List[str], output_format: str) -> int: diff --git a/scripts/validate_unwanted_patterns.py b/scripts/validate_unwanted_patterns.py index 4a0e859535215..b6ffab1482bbc 100755 --- a/scripts/validate_unwanted_patterns.py +++ b/scripts/validate_unwanted_patterns.py @@ -447,7 +447,7 @@ def main( if os.path.isfile(source_path): file_path = source_path - with open(file_path, "r") as file_obj: + with open(file_path) as file_obj: for line_number, msg in function(file_obj): is_failed = True print( @@ -466,7 +466,7 @@ def main( continue file_path = os.path.join(subdir, file_name) - with open(file_path, "r") as file_obj: + with open(file_path) as file_obj: for line_number, msg in function(file_obj): is_failed = True print( diff --git a/setup.py b/setup.py index a8dfeb0974195..8f447d5c38169 100755 --- a/setup.py +++ b/setup.py @@ -99,7 +99,7 @@ def render_templates(cls, pxifiles): # if .pxi.in is not updated, no need to output .pxi continue - with open(pxifile, "r") as f: + with open(pxifile) as f: tmpl = f.read() pyxcontent = tempita.sub(tmpl) diff --git a/versioneer.py b/versioneer.py index 5882349f65f0b..65c9523ba5573 100644 --- a/versioneer.py +++ b/versioneer.py @@ -349,7 +349,7 @@ import sys -class VersioneerConfig(object): +class VersioneerConfig: pass @@ -398,7 +398,7 @@ def get_config_from_root(root): # the top of versioneer.py for instructions on writing your setup.cfg . setup_cfg = os.path.join(root, "setup.cfg") parser = configparser.SafeConfigParser() - with open(setup_cfg, "r") as f: + with open(setup_cfg) as f: parser.readfp(f) VCS = parser.get("versioneer", "VCS") # mandatory @@ -451,7 +451,7 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False): stderr=(subprocess.PIPE if hide_stderr else None), ) break - except EnvironmentError: + except OSError: e = sys.exc_info()[1] if e.errno == errno.ENOENT: continue @@ -461,7 +461,7 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False): return None else: if verbose: - print("unable to find command, tried %s" % (commands,)) + print(f"unable to find command, tried {commands}") return None stdout = p.communicate()[0].strip().decode() @@ -946,7 +946,7 @@ def git_get_keywords(versionfile_abs): # _version.py. keywords = {} try: - f = open(versionfile_abs, "r") + f = open(versionfile_abs) for line in f.readlines(): if line.strip().startswith("git_refnames ="): mo = re.search(r'=\s*"(.*)"', line) @@ -957,7 +957,7 @@ def git_get_keywords(versionfile_abs): if mo: keywords["full"] = mo.group(1) f.close() - except EnvironmentError: + except OSError: pass return keywords @@ -1072,9 +1072,8 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): if verbose: fmt = "tag '%s' doesn't start with prefix '%s'" print(fmt % (full_tag, tag_prefix)) - pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % ( - full_tag, - tag_prefix, + pieces["error"] = "tag '{}' doesn't start with prefix '{}'".format( + full_tag, tag_prefix, ) return pieces pieces["closest-tag"] = full_tag[len(tag_prefix) :] @@ -1111,13 +1110,13 @@ def do_vcs_install(manifest_in, versionfile_source, ipy): files.append(versioneer_file) present = False try: - f = open(".gitattributes", "r") + f = open(".gitattributes") for line in f.readlines(): if line.strip().startswith(versionfile_source): if "export-subst" in line.strip().split()[1:]: present = True f.close() - except EnvironmentError: + except OSError: pass if not present: f = open(".gitattributes", "a+") @@ -1171,7 +1170,7 @@ def versions_from_file(filename): try: with open(filename) as f: contents = f.read() - except EnvironmentError: + except OSError: raise NotThisMethod("unable to read _version.py") mo = re.search( r"version_json = '''\n(.*)''' # END VERSION_JSON", contents, re.M | re.S @@ -1187,7 +1186,7 @@ def write_to_version_file(filename, versions): with open(filename, "w") as f: f.write(SHORT_VERSION_PY % contents) - print("set %s to '%s'" % (filename, versions["version"])) + print("set {} to '{}'".format(filename, versions["version"])) def plus_or_dot(pieces): @@ -1399,7 +1398,7 @@ def get_versions(verbose=False): try: ver = versions_from_file(versionfile_abs) if verbose: - print("got version from file %s %s" % (versionfile_abs, ver)) + print(f"got version from file {versionfile_abs} {ver}") return ver except NotThisMethod: pass @@ -1619,11 +1618,7 @@ def do_setup(): root = get_root() try: cfg = get_config_from_root(root) - except ( - EnvironmentError, - configparser.NoSectionError, - configparser.NoOptionError, - ) as e: + except (OSError, configparser.NoSectionError, configparser.NoOptionError) as e: if isinstance(e, (EnvironmentError, configparser.NoSectionError)): print("Adding sample versioneer config to setup.cfg", file=sys.stderr) with open(os.path.join(root, "setup.cfg"), "a") as f: @@ -1648,9 +1643,9 @@ def do_setup(): ipy = os.path.join(os.path.dirname(cfg.versionfile_source), "__init__.py") if os.path.exists(ipy): try: - with open(ipy, "r") as f: + with open(ipy) as f: old = f.read() - except EnvironmentError: + except OSError: old = "" if INIT_PY_SNIPPET not in old: print(" appending to %s" % ipy) @@ -1669,12 +1664,12 @@ def do_setup(): manifest_in = os.path.join(root, "MANIFEST.in") simple_includes = set() try: - with open(manifest_in, "r") as f: + with open(manifest_in) as f: for line in f: if line.startswith("include "): for include in line.split()[1:]: simple_includes.add(include) - except EnvironmentError: + except OSError: pass # That doesn't cover everything MANIFEST.in can do # (https://docs.python.org/2/distutils/sourcedist.html#commands), so @@ -1707,7 +1702,7 @@ def scan_setup_py(): found = set() setters = False errors = 0 - with open("setup.py", "r") as f: + with open("setup.py") as f: for line in f.readlines(): if "import versioneer" in line: found.add("import")