@@ -378,25 +378,25 @@ def test_read_duplicate_index_implicit(all_parsers):
378
378
[
379
379
(
380
380
"A,B\n True,1\n False,2\n True,3" ,
381
- dict () ,
381
+ {} ,
382
382
DataFrame ([[True , 1 ], [False , 2 ], [True , 3 ]], columns = ["A" , "B" ]),
383
383
),
384
384
(
385
385
"A,B\n YES,1\n no,2\n yes,3\n No,3\n Yes,3" ,
386
- dict ( true_values = ["yes" , "Yes" , "YES" ], false_values = ["no" , "NO" , "No" ]) ,
386
+ { " true_values" : ["yes" , "Yes" , "YES" ], " false_values" : ["no" , "NO" , "No" ]} ,
387
387
DataFrame (
388
388
[[True , 1 ], [False , 2 ], [True , 3 ], [False , 3 ], [True , 3 ]],
389
389
columns = ["A" , "B" ],
390
390
),
391
391
),
392
392
(
393
393
"A,B\n TRUE,1\n FALSE,2\n TRUE,3" ,
394
- dict () ,
394
+ {} ,
395
395
DataFrame ([[True , 1 ], [False , 2 ], [True , 3 ]], columns = ["A" , "B" ]),
396
396
),
397
397
(
398
398
"A,B\n foo,bar\n bar,foo" ,
399
- dict ( true_values = ["foo" ], false_values = ["bar" ]) ,
399
+ { " true_values" : ["foo" ], " false_values" : ["bar" ]} ,
400
400
DataFrame ([[True , False ], [False , True ]], columns = ["A" , "B" ]),
401
401
),
402
402
],
@@ -520,7 +520,7 @@ def test_read_chunksize_and_nrows(all_parsers, chunksize):
520
520
bar2,12,13,14,15
521
521
"""
522
522
parser = all_parsers
523
- kwargs = dict ( index_col = 0 , nrows = 5 )
523
+ kwargs = { " index_col" : 0 , " nrows" : 5 }
524
524
525
525
expected = parser .read_csv (StringIO (data ), ** kwargs )
526
526
with parser .read_csv (StringIO (data ), chunksize = chunksize , ** kwargs ) as reader :
@@ -537,7 +537,7 @@ def test_read_chunksize_and_nrows_changing_size(all_parsers):
537
537
bar2,12,13,14,15
538
538
"""
539
539
parser = all_parsers
540
- kwargs = dict ( index_col = 0 , nrows = 5 )
540
+ kwargs = { " index_col" : 0 , " nrows" : 5 }
541
541
542
542
expected = parser .read_csv (StringIO (data ), ** kwargs )
543
543
with parser .read_csv (StringIO (data ), chunksize = 8 , ** kwargs ) as reader :
@@ -563,7 +563,7 @@ def test_get_chunk_passed_chunksize(all_parsers):
563
563
tm .assert_frame_equal (result , expected )
564
564
565
565
566
- @pytest .mark .parametrize ("kwargs" , [dict (), dict ( index_col = 0 ) ])
566
+ @pytest .mark .parametrize ("kwargs" , [{}, { " index_col" : 0 } ])
567
567
def test_read_chunksize_compat (all_parsers , kwargs ):
568
568
# see gh-12185
569
569
data = """index,A,B,C,D
@@ -593,7 +593,7 @@ def test_read_chunksize_jagged_names(all_parsers):
593
593
594
594
def test_read_data_list (all_parsers ):
595
595
parser = all_parsers
596
- kwargs = dict ( index_col = 0 )
596
+ kwargs = { " index_col" : 0 }
597
597
data = "A,B,C\n foo,1,2,3\n bar,4,5,6"
598
598
599
599
data_list = [["A" , "B" , "C" ], ["foo" , "1" , "2" , "3" ], ["bar" , "4" , "5" , "6" ]]
@@ -616,7 +616,7 @@ def test_iterator(all_parsers):
616
616
bar2,12,13,14,15
617
617
"""
618
618
parser = all_parsers
619
- kwargs = dict ( index_col = 0 )
619
+ kwargs = { " index_col" : 0 }
620
620
621
621
expected = parser .read_csv (StringIO (data ), ** kwargs )
622
622
with parser .read_csv (StringIO (data ), iterator = True , ** kwargs ) as reader :
@@ -657,7 +657,7 @@ def test_reader_list(all_parsers):
657
657
bar2,12,13,14,15
658
658
"""
659
659
parser = all_parsers
660
- kwargs = dict ( index_col = 0 )
660
+ kwargs = { " index_col" : 0 }
661
661
662
662
lines = list (csv .reader (StringIO (data )))
663
663
with TextParser (lines , chunksize = 2 , ** kwargs ) as reader :
@@ -680,7 +680,7 @@ def test_reader_list_skiprows(all_parsers):
680
680
bar2,12,13,14,15
681
681
"""
682
682
parser = all_parsers
683
- kwargs = dict ( index_col = 0 )
683
+ kwargs = { " index_col" : 0 }
684
684
685
685
lines = list (csv .reader (StringIO (data )))
686
686
with TextParser (lines , chunksize = 2 , skiprows = [1 ], ** kwargs ) as reader :
@@ -713,7 +713,7 @@ def test_iterator_stop_on_chunksize(all_parsers):
713
713
714
714
715
715
@pytest .mark .parametrize (
716
- "kwargs" , [dict ( iterator = True , chunksize = 1 ), dict ( iterator = True ), dict ( chunksize = 1 ) ]
716
+ "kwargs" , [{ " iterator" : True , " chunksize" : 1 }, { " iterator" : True }, { " chunksize" : 1 } ]
717
717
)
718
718
def test_iterator_skipfooter_errors (all_parsers , kwargs ):
719
719
msg = "'skipfooter' not supported for iteration"
@@ -745,7 +745,7 @@ def test_nrows_skipfooter_errors(all_parsers):
745
745
foo2,12,13,14,15
746
746
bar2,12,13,14,15
747
747
""" ,
748
- dict ( index_col = 0 , names = ["index" , "A" , "B" , "C" , "D" ]) ,
748
+ { " index_col" : 0 , " names" : ["index" , "A" , "B" , "C" , "D" ]} ,
749
749
DataFrame (
750
750
[
751
751
[2 , 3 , 4 , 5 ],
@@ -766,7 +766,7 @@ def test_nrows_skipfooter_errors(all_parsers):
766
766
bar,one,12,13,14,15
767
767
bar,two,12,13,14,15
768
768
""" ,
769
- dict ( index_col = [0 , 1 ], names = ["index1" , "index2" , "A" , "B" , "C" , "D" ]) ,
769
+ { " index_col" : [0 , 1 ], " names" : ["index1" , "index2" , "A" , "B" , "C" , "D" ]} ,
770
770
DataFrame (
771
771
[
772
772
[2 , 3 , 4 , 5 ],
@@ -906,7 +906,7 @@ def test_read_csv_parse_simple_list(all_parsers):
906
906
def test_url (all_parsers , csv_dir_path ):
907
907
# TODO: FTP testing
908
908
parser = all_parsers
909
- kwargs = dict ( sep = " \t ")
909
+ kwargs = { " sep" : " \t "}
910
910
911
911
url = (
912
912
"https://raw.github.com/pandas-dev/pandas/master/"
@@ -922,7 +922,7 @@ def test_url(all_parsers, csv_dir_path):
922
922
@pytest .mark .slow
923
923
def test_local_file (all_parsers , csv_dir_path ):
924
924
parser = all_parsers
925
- kwargs = dict ( sep = " \t ")
925
+ kwargs = { " sep" : " \t "}
926
926
927
927
local_path = os .path .join (csv_dir_path , "salaries.csv" )
928
928
local_result = parser .read_csv (local_path , ** kwargs )
@@ -1374,77 +1374,77 @@ def test_empty_with_nrows_chunksize(all_parsers, iterator):
1374
1374
# gh-10728: WHITESPACE_LINE
1375
1375
(
1376
1376
"a,b,c\n 4,5,6\n " ,
1377
- dict () ,
1377
+ {} ,
1378
1378
DataFrame ([[4 , 5 , 6 ]], columns = ["a" , "b" , "c" ]),
1379
1379
None ,
1380
1380
),
1381
1381
# gh-10548: EAT_LINE_COMMENT
1382
1382
(
1383
1383
"a,b,c\n 4,5,6\n #comment" ,
1384
- dict ( comment = "#" ) ,
1384
+ { " comment" : "#" } ,
1385
1385
DataFrame ([[4 , 5 , 6 ]], columns = ["a" , "b" , "c" ]),
1386
1386
None ,
1387
1387
),
1388
1388
# EAT_CRNL_NOP
1389
1389
(
1390
1390
"a,b,c\n 4,5,6\n \r " ,
1391
- dict () ,
1391
+ {} ,
1392
1392
DataFrame ([[4 , 5 , 6 ]], columns = ["a" , "b" , "c" ]),
1393
1393
None ,
1394
1394
),
1395
1395
# EAT_COMMENT
1396
1396
(
1397
1397
"a,b,c\n 4,5,6#comment" ,
1398
- dict ( comment = "#" ) ,
1398
+ { " comment" : "#" } ,
1399
1399
DataFrame ([[4 , 5 , 6 ]], columns = ["a" , "b" , "c" ]),
1400
1400
None ,
1401
1401
),
1402
1402
# SKIP_LINE
1403
1403
(
1404
1404
"a,b,c\n 4,5,6\n skipme" ,
1405
- dict ( skiprows = [2 ]) ,
1405
+ { " skiprows" : [2 ]} ,
1406
1406
DataFrame ([[4 , 5 , 6 ]], columns = ["a" , "b" , "c" ]),
1407
1407
None ,
1408
1408
),
1409
1409
# EAT_LINE_COMMENT
1410
1410
(
1411
1411
"a,b,c\n 4,5,6\n #comment" ,
1412
- dict ( comment = " #" , skip_blank_lines = False ) ,
1412
+ { " comment" : " #" , " skip_blank_lines" : False } ,
1413
1413
DataFrame ([[4 , 5 , 6 ]], columns = ["a" , "b" , "c" ]),
1414
1414
None ,
1415
1415
),
1416
1416
# IN_FIELD
1417
1417
(
1418
1418
"a,b,c\n 4,5,6\n " ,
1419
- dict ( skip_blank_lines = False ) ,
1419
+ { " skip_blank_lines" : False } ,
1420
1420
DataFrame ([["4" , 5 , 6 ], [" " , None , None ]], columns = ["a" , "b" , "c" ]),
1421
1421
None ,
1422
1422
),
1423
1423
# EAT_CRNL
1424
1424
(
1425
1425
"a,b,c\n 4,5,6\n \r " ,
1426
- dict ( skip_blank_lines = False ) ,
1426
+ { " skip_blank_lines" : False } ,
1427
1427
DataFrame ([[4 , 5 , 6 ], [None , None , None ]], columns = ["a" , "b" , "c" ]),
1428
1428
None ,
1429
1429
),
1430
1430
# ESCAPED_CHAR
1431
1431
(
1432
1432
"a,b,c\n 4,5,6\n \\ " ,
1433
- dict ( escapechar = " \\ ") ,
1433
+ { " escapechar" : " \\ "} ,
1434
1434
None ,
1435
1435
"(EOF following escape character)|(unexpected end of data)" ,
1436
1436
),
1437
1437
# ESCAPE_IN_QUOTED_FIELD
1438
1438
(
1439
1439
'a,b,c\n 4,5,6\n "\\ ' ,
1440
- dict ( escapechar = " \\ ") ,
1440
+ { " escapechar" : " \\ "} ,
1441
1441
None ,
1442
1442
"(EOF inside string starting at row 2)|(unexpected end of data)" ,
1443
1443
),
1444
1444
# IN_QUOTED_FIELD
1445
1445
(
1446
1446
'a,b,c\n 4,5,6\n "' ,
1447
- dict ( escapechar = " \\ ") ,
1447
+ { " escapechar" : " \\ "} ,
1448
1448
None ,
1449
1449
"(EOF inside string starting at row 2)|(unexpected end of data)" ,
1450
1450
),
@@ -1502,16 +1502,16 @@ def test_uneven_lines_with_usecols(all_parsers, usecols):
1502
1502
[
1503
1503
# First, check to see that the response of parser when faced with no
1504
1504
# provided columns raises the correct error, with or without usecols.
1505
- ("" , dict () , None ),
1506
- ("" , dict ( usecols = ["X" ]) , None ),
1505
+ ("" , {} , None ),
1506
+ ("" , { " usecols" : ["X" ]} , None ),
1507
1507
(
1508
1508
",," ,
1509
- dict ( names = ["Dummy" , "X" , "Dummy_2" ], usecols = ["X" ]) ,
1509
+ { " names" : ["Dummy" , "X" , "Dummy_2" ], " usecols" : ["X" ]} ,
1510
1510
DataFrame (columns = ["X" ], index = [0 ], dtype = np .float64 ),
1511
1511
),
1512
1512
(
1513
1513
"" ,
1514
- dict ( names = ["Dummy" , "X" , "Dummy_2" ], usecols = ["X" ]) ,
1514
+ { " names" : ["Dummy" , "X" , "Dummy_2" ], " usecols" : ["X" ]} ,
1515
1515
DataFrame (columns = ["X" ]),
1516
1516
),
1517
1517
],
@@ -1535,19 +1535,21 @@ def test_read_empty_with_usecols(all_parsers, data, kwargs, expected):
1535
1535
# gh-8661, gh-8679: this should ignore six lines, including
1536
1536
# lines with trailing whitespace and blank lines.
1537
1537
(
1538
- dict (
1539
- header = None ,
1540
- delim_whitespace = True ,
1541
- skiprows = [0 , 1 , 2 , 3 , 5 , 6 ],
1542
- skip_blank_lines = True ,
1543
- ) ,
1538
+ {
1539
+ " header" : None ,
1540
+ " delim_whitespace" : True ,
1541
+ " skiprows" : [0 , 1 , 2 , 3 , 5 , 6 ],
1542
+ " skip_blank_lines" : True ,
1543
+ } ,
1544
1544
DataFrame ([[1.0 , 2.0 , 4.0 ], [5.1 , np .nan , 10.0 ]]),
1545
1545
),
1546
1546
# gh-8983: test skipping set of rows after a row with trailing spaces.
1547
1547
(
1548
- dict (
1549
- delim_whitespace = True , skiprows = [1 , 2 , 3 , 5 , 6 ], skip_blank_lines = True
1550
- ),
1548
+ {
1549
+ "delim_whitespace" : True ,
1550
+ "skiprows" : [1 , 2 , 3 , 5 , 6 ],
1551
+ "skip_blank_lines" : True ,
1552
+ },
1551
1553
DataFrame ({"A" : [1.0 , 5.1 ], "B" : [2.0 , np .nan ], "C" : [4.0 , 10 ]}),
1552
1554
),
1553
1555
],
@@ -1717,7 +1719,7 @@ def test_verbose_read2(all_parsers, capsys):
1717
1719
1718
1720
def test_iteration_open_handle (all_parsers ):
1719
1721
parser = all_parsers
1720
- kwargs = dict ( squeeze = True , header = None )
1722
+ kwargs = { " squeeze" : True , " header" : None }
1721
1723
1722
1724
with tm .ensure_clean () as path :
1723
1725
with open (path , "w" ) as f :
@@ -1985,10 +1987,10 @@ def seek(self, pos, whence=0):
1985
1987
1986
1988
@pytest .mark .parametrize (
1987
1989
"kwargs" ,
1988
- [dict (), dict ( error_bad_lines = True ) ], # Default is True. # Explicitly pass in.
1990
+ [{}, { " error_bad_lines" : True } ], # Default is True. # Explicitly pass in.
1989
1991
)
1990
1992
@pytest .mark .parametrize (
1991
- "warn_kwargs" , [dict (), dict ( warn_bad_lines = True ), dict ( warn_bad_lines = False ) ]
1993
+ "warn_kwargs" , [{}, { " warn_bad_lines" : True }, { " warn_bad_lines" : False } ]
1992
1994
)
1993
1995
def test_error_bad_lines (all_parsers , kwargs , warn_kwargs ):
1994
1996
# see gh-15925
0 commit comments