Skip to content

Commit 43b0560

Browse files
Remove detailed comment; use temp path helper
1 parent e3bc03e commit 43b0560

File tree

1 file changed

+6
-16
lines changed

1 file changed

+6
-16
lines changed

pandas/tests/io/test_parquet.py

+6-16
Original file line numberDiff line numberDiff line change
@@ -1131,27 +1131,17 @@ def test_infer_string_large_string_type(self, tmp_path, pa):
11311131
# assert result["strings"].dtype == "string"
11321132
# FIXME: don't leave commented-out
11331133

1134-
def test_non_nanosecond_timestamps(self, tmp_path, pa):
1134+
def test_non_nanosecond_timestamps(self):
11351135
# GH#49236
1136-
#
1137-
# pandas 1.x didn't support non-nanosecond datetimes.
1138-
# pyarrow.Table.to_pandas supports timestamp_as_object param to solve that issue
1139-
# https://arrow.apache.org/docs/python/generated/pyarrow.Table.html#pyarrow.Table.to_pandas
1140-
#
1141-
# This test tests that the current version of pandas
1142-
# supports non-nanosecond (microsecond in this case) datetimes,
1143-
# the code example from GH#49236 doesn't fail anymore,
1144-
# and timestamp_as_object param is not needed.
11451136
import pyarrow as pa
11461137
import pyarrow.parquet as pq
11471138

1148-
path = tmp_path / "non_nanosecond_timestamp.p"
1149-
1150-
arr = pa.array([datetime.datetime(1600, 1, 1)], type=pa.timestamp("us"))
1151-
table = pa.table([arr], names=["timestamp"])
1152-
pq.write_table(table, path)
1139+
with tm.ensure_clean() as path:
1140+
arr = pa.array([datetime.datetime(1600, 1, 1)], type=pa.timestamp("us"))
1141+
table = pa.table([arr], names=["timestamp"])
1142+
pq.write_table(table, path)
1143+
result = read_parquet(path)
11531144

1154-
result = read_parquet(path)
11551145
expected = pd.DataFrame(
11561146
data={"timestamp": [datetime.datetime(1600, 1, 1)]},
11571147
dtype="datetime64[us]",

0 commit comments

Comments
 (0)