Skip to content

CLN: remove verbose and private_key reference #34640 #34722

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 3 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 0 additions & 6 deletions pandas/io/gbq.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,6 @@ def read_gbq(
credentials=None,
use_bqstorage_api: Optional[bool] = None,
max_results: Optional[int] = None,
private_key=None,
verbose=None,
progress_bar_type: Optional[str] = None,
) -> "DataFrame":
"""
Expand Down Expand Up @@ -208,8 +206,6 @@ def to_gbq(
location: Optional[str] = None,
progress_bar: bool = True,
credentials=None,
verbose=None,
private_key=None,
) -> None:
pandas_gbq = _try_import()
pandas_gbq.to_gbq(
Expand All @@ -224,6 +220,4 @@ def to_gbq(
location=location,
progress_bar=progress_bar,
credentials=credentials,
verbose=verbose,
private_key=private_key,
)
11 changes: 2 additions & 9 deletions pandas/io/parsers.py
Original file line number Diff line number Diff line change
Expand Up @@ -215,8 +215,6 @@
Detect missing value markers (empty strings and the value of na_values). In
data without any NAs, passing na_filter=False can improve the performance
of reading a large file.
verbose : bool, default False
Indicate number of NA values placed in non-numeric columns.
skip_blank_lines : bool, default True
If True, skip over blank lines rather than interpreting as NaN values.
parse_dates : bool or list of int or names or list of lists or dict, \
Expand Down Expand Up @@ -498,7 +496,6 @@ def _read(filepath_or_buffer: FilePathOrBuffer, kwds):
"usecols": None,
# 'iterator': False,
"chunksize": None,
"verbose": False,
"encoding": None,
"squeeze": False,
"compression": None,
Expand Down Expand Up @@ -554,7 +551,6 @@ def parser_f(
na_values=None,
keep_default_na=True,
na_filter=True,
verbose=False,
skip_blank_lines=True,
# Datetime Handling
parse_dates=False,
Expand Down Expand Up @@ -658,7 +654,6 @@ def parser_f(
converters=converters,
dtype=dtype,
usecols=usecols,
verbose=verbose,
encoding=encoding,
squeeze=squeeze,
memory_map=memory_map,
Expand Down Expand Up @@ -1710,7 +1705,7 @@ def _agg_index(self, index, try_parse_dates=True):
return index

def _convert_to_ndarrays(
self, dct, na_values, na_fvalues, verbose=False, converters=None, dtypes=None
self, dct, na_values, na_fvalues, converters=None, dtypes=None
):
result = {}
for c, values in dct.items():
Expand Down Expand Up @@ -1780,7 +1775,7 @@ def _convert_to_ndarrays(
cvals = self._cast_types(cvals, cast_type, c)

result[c] = cvals
if verbose and na_count:
if na_count:
print(f"Filled {na_count} NA values in column {c!s}")
return result

Expand Down Expand Up @@ -2303,7 +2298,6 @@ def __init__(self, f, **kwds):
if "has_index_names" in kwds:
self.has_index_names = kwds["has_index_names"]

self.verbose = kwds["verbose"]
self.converters = kwds["converters"]

self.dtype = kwds["dtype"]
Expand Down Expand Up @@ -2588,7 +2582,6 @@ def _clean_mapping(mapping):
data,
clean_na_values,
clean_na_fvalues,
self.verbose,
clean_conv,
clean_dtypes,
)
Expand Down