diff --git a/doc/source/whatsnew/v2.3.0.rst b/doc/source/whatsnew/v2.3.0.rst index 8bdddb5b7f85d..831d8b802f363 100644 --- a/doc/source/whatsnew/v2.3.0.rst +++ b/doc/source/whatsnew/v2.3.0.rst @@ -137,6 +137,7 @@ MultiIndex I/O ^^^ - :meth:`DataFrame.to_excel` was storing decimals as strings instead of numbers (:issue:`49598`) +- Bug in :func:`read_sql` causing an unintended exception when byte data was being converted to string when using the pyarrow dtype_backend (:issue:`59242`) - Period diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index d8dd6441913b5..a90b914ae40fd 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -2260,7 +2260,9 @@ def type(self): elif pa.types.is_null(pa_type): # TODO: None? pd.NA? pa.null? return type(pa_type) - elif isinstance(pa_type, pa.ExtensionType): + elif isinstance(pa_type, pa.ExtensionType) or isinstance( + pa_type, pa.OpaqueType + ): return type(self)(pa_type.storage_type).type raise NotImplementedError(pa_type) diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py index 69da2be0306f6..c3e51b94e4d79 100644 --- a/pandas/core/internals/construction.py +++ b/pandas/core/internals/construction.py @@ -966,10 +966,6 @@ def convert(arr): # i.e. maybe_convert_objects didn't convert convert_to_nullable_dtype = dtype_backend != "numpy" arr = maybe_infer_to_datetimelike(arr, convert_to_nullable_dtype) - if convert_to_nullable_dtype and arr.dtype == np.dtype("O"): - new_dtype = StringDtype() - arr_cls = new_dtype.construct_array_type() - arr = arr_cls._from_sequence(arr, dtype=new_dtype) elif dtype_backend != "numpy" and isinstance(arr, np.ndarray): if arr.dtype.kind in "iufb": arr = pd_array(arr, copy=False) diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py index 7e1220ecee218..95aebb2907d2c 100644 --- a/pandas/tests/io/test_sql.py +++ b/pandas/tests/io/test_sql.py @@ -4358,3 +4358,45 @@ def test_xsqlite_if_exists(sqlite_buildin): (5, "E"), ] drop_table(table_name, sqlite_buildin) + + +@pytest.mark.parametrize("con", all_connectable) +def test_bytes_column(con, dtype_backend, request): + # GitHub Issue #59242 + conn = request.getfixturevalue(con) + pa = pytest.importorskip("pyarrow") + + hex_str = "0123456789abcdef0123456789abcdef" + val = bytes.fromhex(hex_str) + if "postgres" in con: + if "adbc" in con: + val = b"\x00\x00\x00\x80\x01#Eg\x89\xab\xcd\xef\x01#Eg\x89\xab\xcd\xef" + else: + val = ( + "0000000100100011010001010110011110001001101010" + "11110011011110111100000001001000110100010101100" + "11110001001101010111100110111101111" + ) + + if dtype_backend == "pyarrow": + dtype = pd.ArrowDtype(pa.binary()) + if "postgres" in con: + if "adbc" in con: + dtype = pd.ArrowDtype(pa.opaque(pa.binary(), "bit", "PostgreSQL")) + else: + dtype = pd.ArrowDtype(pa.string()) + else: + dtype = "O" + if "postgres" in con and "psycopg2" in con: + if dtype_backend == "numpy_nullable": + dtype = pd.StringDtype() + elif dtype_backend == lib.no_default and pd.options.future.infer_string: + dtype = pd.StringDtype(storage="pyarrow", na_value=np.nan) + + expected = DataFrame([{"a": val}], dtype=dtype) + df = pd.read_sql( + f"select x'{hex_str}' a", + conn, + dtype_backend=dtype_backend, + ) + tm.assert_frame_equal(df, expected)