Skip to content

Commit f230020

Browse files
psrok1Roconda
authored andcommitted
Fix: slow multipart parsing for huge files with few CR/LF characters
1 parent 26f3e95 commit f230020

File tree

1 file changed

+9
-1
lines changed

1 file changed

+9
-1
lines changed

src/werkzeug/sansio/multipart.py

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -251,12 +251,20 @@ def _parse_data(self, data: bytes, *, start: bool) -> tuple[bytes, int, bool]:
251251
else:
252252
data_start = 0
253253

254-
if self.buffer.find(b"--" + self.boundary) == -1:
254+
boundary = b"--" + self.boundary
255+
256+
if self.buffer.find(boundary) == -1:
255257
# No complete boundary in the buffer, but there may be
256258
# a partial boundary at the end. As the boundary
257259
# starts with either a nl or cr find the earliest and
258260
# return up to that as data.
259261
data_end = del_index = self.last_newline(data[data_start:]) + data_start
262+
# If amount of data after last newline is far from
263+
# possible length of partial boundary, we should
264+
# assume that there is no partial boundary in the buffer
265+
# and return all pending data.
266+
if (len(data) - data_end) > len(b"\n" + boundary):
267+
data_end = del_index = len(data)
260268
more_data = True
261269
else:
262270
match = self.boundary_re.search(data)

0 commit comments

Comments
 (0)