Skip to content

Commit 27e0330

Browse files
authored
ENH: 'encoding_errors' argument for read_csv/json (#39777)
1 parent 3570151 commit 27e0330

File tree

13 files changed

+162
-54
lines changed

13 files changed

+162
-54
lines changed

doc/source/whatsnew/v1.3.0.rst

+1
Original file line numberDiff line numberDiff line change
@@ -142,6 +142,7 @@ Other enhancements
142142
- Add support for unary operators in :class:`FloatingArray` (:issue:`38749`)
143143
- :class:`RangeIndex` can now be constructed by passing a ``range`` object directly e.g. ``pd.RangeIndex(range(3))`` (:issue:`12067`)
144144
- :meth:`round` being enabled for the nullable integer and floating dtypes (:issue:`38844`)
145+
- :meth:`pandas.read_csv` and :meth:`pandas.read_json` expose the argument ``encoding_errors`` to control how encoding errors are handled (:issue:`39450`)
145146

146147
.. ---------------------------------------------------------------------------
147148

pandas/_libs/parsers.pyx

+28-25
Original file line numberDiff line numberDiff line change
@@ -20,13 +20,19 @@ from libc.string cimport (
2020
import cython
2121
from cython import Py_ssize_t
2222

23-
from cpython.bytes cimport PyBytes_AsString
23+
from cpython.bytes cimport (
24+
PyBytes_AsString,
25+
PyBytes_FromString,
26+
)
2427
from cpython.exc cimport (
2528
PyErr_Fetch,
2629
PyErr_Occurred,
2730
)
2831
from cpython.object cimport PyObject
29-
from cpython.ref cimport Py_XDECREF
32+
from cpython.ref cimport (
33+
Py_INCREF,
34+
Py_XDECREF,
35+
)
3036
from cpython.unicode cimport (
3137
PyUnicode_AsUTF8String,
3238
PyUnicode_Decode,
@@ -143,7 +149,7 @@ cdef extern from "parser/tokenizer.h":
143149
enum: ERROR_OVERFLOW
144150

145151
ctypedef void* (*io_callback)(void *src, size_t nbytes, size_t *bytes_read,
146-
int *status)
152+
int *status, const char *encoding_errors)
147153
ctypedef int (*io_cleanup)(void *src)
148154

149155
ctypedef struct parser_t:
@@ -255,8 +261,8 @@ cdef extern from "parser/tokenizer.h":
255261

256262
int parser_trim_buffers(parser_t *self)
257263

258-
int tokenize_all_rows(parser_t *self) nogil
259-
int tokenize_nrows(parser_t *self, size_t nrows) nogil
264+
int tokenize_all_rows(parser_t *self, const char *encoding_errors) nogil
265+
int tokenize_nrows(parser_t *self, size_t nrows, const char *encoding_errors) nogil
260266

261267
int64_t str_to_int64(char *p_item, int64_t int_min,
262268
int64_t int_max, int *error, char tsep) nogil
@@ -293,7 +299,7 @@ cdef extern from "parser/io.h":
293299
size_t *bytes_read, int *status)
294300

295301
void* buffer_rd_bytes(void *source, size_t nbytes,
296-
size_t *bytes_read, int *status)
302+
size_t *bytes_read, int *status, const char *encoding_errors)
297303

298304

299305
cdef class TextReader:
@@ -316,6 +322,7 @@ cdef class TextReader:
316322
uint64_t parser_start
317323
list clocks
318324
char *c_encoding
325+
const char *encoding_errors
319326
kh_str_starts_t *false_set
320327
kh_str_starts_t *true_set
321328

@@ -370,10 +377,15 @@ cdef class TextReader:
370377
bint verbose=False,
371378
bint mangle_dupe_cols=True,
372379
float_precision=None,
373-
bint skip_blank_lines=True):
380+
bint skip_blank_lines=True,
381+
encoding_errors=b"strict"):
374382

375383
# set encoding for native Python and C library
376384
self.c_encoding = NULL
385+
if isinstance(encoding_errors, str):
386+
encoding_errors = encoding_errors.encode("utf-8")
387+
Py_INCREF(encoding_errors)
388+
self.encoding_errors = PyBytes_AsString(encoding_errors)
377389

378390
self.parser = parser_new()
379391
self.parser.chunksize = tokenize_chunksize
@@ -558,13 +570,7 @@ cdef class TextReader:
558570
pass
559571

560572
def __dealloc__(self):
561-
parser_free(self.parser)
562-
if self.true_set:
563-
kh_destroy_str_starts(self.true_set)
564-
self.true_set = NULL
565-
if self.false_set:
566-
kh_destroy_str_starts(self.false_set)
567-
self.false_set = NULL
573+
self.close()
568574
parser_del(self.parser)
569575

570576
def close(self):
@@ -632,7 +638,6 @@ cdef class TextReader:
632638
char *word
633639
object name, old_name
634640
uint64_t hr, data_line = 0
635-
char *errors = "strict"
636641
StringPath path = _string_path(self.c_encoding)
637642
list header = []
638643
set unnamed_cols = set()
@@ -673,11 +678,8 @@ cdef class TextReader:
673678
for i in range(field_count):
674679
word = self.parser.words[start + i]
675680

676-
if path == UTF8:
677-
name = PyUnicode_FromString(word)
678-
elif path == ENCODED:
679-
name = PyUnicode_Decode(word, strlen(word),
680-
self.c_encoding, errors)
681+
name = PyUnicode_Decode(word, strlen(word),
682+
self.c_encoding, self.encoding_errors)
681683

682684
# We use this later when collecting placeholder names.
683685
old_name = name
@@ -831,7 +833,7 @@ cdef class TextReader:
831833
int status
832834

833835
with nogil:
834-
status = tokenize_nrows(self.parser, nrows)
836+
status = tokenize_nrows(self.parser, nrows, self.encoding_errors)
835837

836838
if self.parser.warn_msg != NULL:
837839
print(self.parser.warn_msg, file=sys.stderr)
@@ -859,7 +861,7 @@ cdef class TextReader:
859861
'the whole file')
860862
else:
861863
with nogil:
862-
status = tokenize_all_rows(self.parser)
864+
status = tokenize_all_rows(self.parser, self.encoding_errors)
863865

864866
if self.parser.warn_msg != NULL:
865867
print(self.parser.warn_msg, file=sys.stderr)
@@ -1201,7 +1203,7 @@ cdef class TextReader:
12011203

12021204
if path == UTF8:
12031205
return _string_box_utf8(self.parser, i, start, end, na_filter,
1204-
na_hashset)
1206+
na_hashset, self.encoding_errors)
12051207
elif path == ENCODED:
12061208
return _string_box_decode(self.parser, i, start, end,
12071209
na_filter, na_hashset, self.c_encoding)
@@ -1352,7 +1354,8 @@ cdef inline StringPath _string_path(char *encoding):
13521354

13531355
cdef _string_box_utf8(parser_t *parser, int64_t col,
13541356
int64_t line_start, int64_t line_end,
1355-
bint na_filter, kh_str_starts_t *na_hashset):
1357+
bint na_filter, kh_str_starts_t *na_hashset,
1358+
const char *encoding_errors):
13561359
cdef:
13571360
int error, na_count = 0
13581361
Py_ssize_t i, lines
@@ -1391,7 +1394,7 @@ cdef _string_box_utf8(parser_t *parser, int64_t col,
13911394
pyval = <object>table.vals[k]
13921395
else:
13931396
# box it. new ref?
1394-
pyval = PyUnicode_FromString(word)
1397+
pyval = PyUnicode_Decode(word, strlen(word), "utf-8", encoding_errors)
13951398

13961399
k = kh_put_strbox(table, word, &ret)
13971400
table.vals[k] = <PyObject *>pyval

pandas/_libs/src/parser/io.c

+2-2
Original file line numberDiff line numberDiff line change
@@ -163,7 +163,7 @@ void *buffer_file_bytes(void *source, size_t nbytes, size_t *bytes_read,
163163
}
164164

165165
void *buffer_rd_bytes(void *source, size_t nbytes, size_t *bytes_read,
166-
int *status) {
166+
int *status, const char *encoding_errors) {
167167
PyGILState_STATE state;
168168
PyObject *result, *func, *args, *tmp;
169169

@@ -191,7 +191,7 @@ void *buffer_rd_bytes(void *source, size_t nbytes, size_t *bytes_read,
191191
*status = CALLING_READ_FAILED;
192192
return NULL;
193193
} else if (!PyBytes_Check(result)) {
194-
tmp = PyUnicode_AsUTF8String(result);
194+
tmp = PyUnicode_AsEncodedString(result, "utf-8", encoding_errors);
195195
Py_DECREF(result);
196196
if (tmp == NULL) {
197197
PyGILState_Release(state);

pandas/_libs/src/parser/io.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -64,6 +64,6 @@ void *buffer_file_bytes(void *source, size_t nbytes, size_t *bytes_read,
6464
int *status);
6565

6666
void *buffer_rd_bytes(void *source, size_t nbytes, size_t *bytes_read,
67-
int *status);
67+
int *status, const char *encoding_errors);
6868

6969
#endif // PANDAS__LIBS_SRC_PARSER_IO_H_

pandas/_libs/src/parser/tokenizer.c

+12-8
Original file line numberDiff line numberDiff line change
@@ -553,13 +553,15 @@ int parser_set_skipfirstnrows(parser_t *self, int64_t nrows) {
553553
return 0;
554554
}
555555

556-
static int parser_buffer_bytes(parser_t *self, size_t nbytes) {
556+
static int parser_buffer_bytes(parser_t *self, size_t nbytes,
557+
const char *encoding_errors) {
557558
int status;
558559
size_t bytes_read;
559560

560561
status = 0;
561562
self->datapos = 0;
562-
self->data = self->cb_io(self->source, nbytes, &bytes_read, &status);
563+
self->data = self->cb_io(self->source, nbytes, &bytes_read, &status,
564+
encoding_errors);
563565
TRACE((
564566
"parser_buffer_bytes self->cb_io: nbytes=%zu, datalen: %d, status=%d\n",
565567
nbytes, bytes_read, status));
@@ -1334,7 +1336,8 @@ int parser_trim_buffers(parser_t *self) {
13341336
all : tokenize all the data vs. certain number of rows
13351337
*/
13361338

1337-
int _tokenize_helper(parser_t *self, size_t nrows, int all) {
1339+
int _tokenize_helper(parser_t *self, size_t nrows, int all,
1340+
const char *encoding_errors) {
13381341
int status = 0;
13391342
uint64_t start_lines = self->lines;
13401343

@@ -1350,7 +1353,8 @@ int _tokenize_helper(parser_t *self, size_t nrows, int all) {
13501353
if (!all && self->lines - start_lines >= nrows) break;
13511354

13521355
if (self->datapos == self->datalen) {
1353-
status = parser_buffer_bytes(self, self->chunksize);
1356+
status = parser_buffer_bytes(self, self->chunksize,
1357+
encoding_errors);
13541358

13551359
if (status == REACHED_EOF) {
13561360
// close out last line
@@ -1383,13 +1387,13 @@ int _tokenize_helper(parser_t *self, size_t nrows, int all) {
13831387
return status;
13841388
}
13851389

1386-
int tokenize_nrows(parser_t *self, size_t nrows) {
1387-
int status = _tokenize_helper(self, nrows, 0);
1390+
int tokenize_nrows(parser_t *self, size_t nrows, const char *encoding_errors) {
1391+
int status = _tokenize_helper(self, nrows, 0, encoding_errors);
13881392
return status;
13891393
}
13901394

1391-
int tokenize_all_rows(parser_t *self) {
1392-
int status = _tokenize_helper(self, -1, 1);
1395+
int tokenize_all_rows(parser_t *self, const char *encoding_errors) {
1396+
int status = _tokenize_helper(self, -1, 1, encoding_errors);
13931397
return status;
13941398
}
13951399

pandas/_libs/src/parser/tokenizer.h

+3-3
Original file line numberDiff line numberDiff line change
@@ -85,7 +85,7 @@ typedef enum {
8585
} QuoteStyle;
8686

8787
typedef void *(*io_callback)(void *src, size_t nbytes, size_t *bytes_read,
88-
int *status);
88+
int *status, const char *encoding_errors);
8989
typedef int (*io_cleanup)(void *src);
9090

9191
typedef struct parser_t {
@@ -196,9 +196,9 @@ void parser_del(parser_t *self);
196196

197197
void parser_set_default_options(parser_t *self);
198198

199-
int tokenize_nrows(parser_t *self, size_t nrows);
199+
int tokenize_nrows(parser_t *self, size_t nrows, const char *encoding_errors);
200200

201-
int tokenize_all_rows(parser_t *self);
201+
int tokenize_all_rows(parser_t *self, const char *encoding_errors);
202202

203203
// Have parsed / type-converted a chunk of data
204204
// and want to free memory from the token stream

pandas/io/common.py

+21-4
Original file line numberDiff line numberDiff line change
@@ -583,12 +583,32 @@ def get_handle(
583583
Returns the dataclass IOHandles
584584
"""
585585
# Windows does not default to utf-8. Set to utf-8 for a consistent behavior
586-
encoding_passed, encoding = encoding, encoding or "utf-8"
586+
encoding = encoding or "utf-8"
587587

588588
# read_csv does not know whether the buffer is opened in binary/text mode
589589
if _is_binary_mode(path_or_buf, mode) and "b" not in mode:
590590
mode += "b"
591591

592+
# valdiate errors
593+
if isinstance(errors, str):
594+
errors = errors.lower()
595+
if errors not in (
596+
None,
597+
"strict",
598+
"ignore",
599+
"replace",
600+
"xmlcharrefreplace",
601+
"backslashreplace",
602+
"namereplace",
603+
"surrogateescape",
604+
"surrogatepass",
605+
):
606+
raise ValueError(
607+
f"Invalid value for `encoding_errors` ({errors}). Please see "
608+
+ "https://docs.python.org/3/library/codecs.html#error-handlers "
609+
+ "for valid values."
610+
)
611+
592612
# open URLs
593613
ioargs = _get_filepath_or_buffer(
594614
path_or_buf,
@@ -677,9 +697,6 @@ def get_handle(
677697
# Check whether the filename is to be opened in binary mode.
678698
# Binary mode does not support 'encoding' and 'newline'.
679699
if ioargs.encoding and "b" not in ioargs.mode:
680-
if errors is None and encoding_passed is None:
681-
# ignore errors when no encoding is specified
682-
errors = "replace"
683700
# Encoding
684701
handle = open(
685702
handle,

pandas/io/json/_json.py

+13-2
Original file line numberDiff line numberDiff line change
@@ -334,6 +334,7 @@ def read_json(
334334
precise_float: bool = False,
335335
date_unit=None,
336336
encoding=None,
337+
encoding_errors: Optional[str] = "strict",
337338
lines: bool = False,
338339
chunksize: Optional[int] = None,
339340
compression: CompressionOptions = "infer",
@@ -456,6 +457,12 @@ def read_json(
456457
encoding : str, default is 'utf-8'
457458
The encoding to use to decode py3 bytes.
458459
460+
encoding_errors : str, optional, default "strict"
461+
How encoding errors are treated. `List of possible values
462+
<https://docs.python.org/3/library/codecs.html#error-handlers>`_ .
463+
464+
.. versionadded:: 1.3
465+
459466
lines : bool, default False
460467
Read the file as a json object per line.
461468
@@ -584,6 +591,7 @@ def read_json(
584591
compression=compression,
585592
nrows=nrows,
586593
storage_options=storage_options,
594+
encoding_errors=encoding_errors,
587595
)
588596

589597
if chunksize:
@@ -620,6 +628,7 @@ def __init__(
620628
compression: CompressionOptions,
621629
nrows: Optional[int],
622630
storage_options: StorageOptions = None,
631+
encoding_errors: Optional[str] = "strict",
623632
):
624633

625634
self.orient = orient
@@ -638,6 +647,7 @@ def __init__(
638647
self.chunksize = chunksize
639648
self.nrows_seen = 0
640649
self.nrows = nrows
650+
self.encoding_errors = encoding_errors
641651
self.handles: Optional[IOHandles] = None
642652

643653
if self.chunksize is not None:
@@ -661,8 +671,8 @@ def _preprocess_data(self, data):
661671
Otherwise, we read it into memory for the `read` method.
662672
"""
663673
if hasattr(data, "read") and not (self.chunksize or self.nrows):
664-
data = data.read()
665-
self.close()
674+
with self:
675+
data = data.read()
666676
if not hasattr(data, "read") and (self.chunksize or self.nrows):
667677
data = StringIO(data)
668678

@@ -692,6 +702,7 @@ def _get_data_from_filepath(self, filepath_or_buffer):
692702
encoding=self.encoding,
693703
compression=self.compression,
694704
storage_options=self.storage_options,
705+
errors=self.encoding_errors,
695706
)
696707
filepath_or_buffer = self.handles.handle
697708

pandas/io/parsers/base_parser.py

+2
Original file line numberDiff line numberDiff line change
@@ -109,6 +109,7 @@
109109
"mangle_dupe_cols": True,
110110
"infer_datetime_format": False,
111111
"skip_blank_lines": True,
112+
"encoding_errors": "strict",
112113
}
113114

114115

@@ -212,6 +213,7 @@ def _open_handles(self, src: FilePathOrBuffer, kwds: Dict[str, Any]) -> None:
212213
compression=kwds.get("compression", None),
213214
memory_map=kwds.get("memory_map", False),
214215
storage_options=kwds.get("storage_options", None),
216+
errors=kwds.get("encoding_errors", "strict"),
215217
)
216218

217219
def _validate_parse_dates_presence(self, columns: List[str]) -> None:

0 commit comments

Comments
 (0)