From 822230d411fed5d92706be9413440dea093e2905 Mon Sep 17 00:00:00 2001 From: Serhiy Storchaka Date: Mon, 20 May 2024 12:35:59 +0300 Subject: [PATCH 01/19] gh-115952: Fix vulnerability in the pickle module Loading a small data which does not even involve arbitrary code execution could consume arbitrary large amount of memory. There were three issues: * PUT and LONG_BINPUT with large argument (the C implementation only). Since the memo is implemented in C as a continuous dynamic array, a single opcode can cause its resizing to arbitrary size. Now the sparsity of memo indices is limited. * BINBYTES, BINBYTES8 and BYTEARRAY8 with large argument. They allocated the bytes or bytearray object of the specified size before reading into it. Now they read very large data by chunks. * BINSTRING, BINUNICODE, LONG4, BINUNICODE8 and FRAME with large argument. They read the whole data by calling the read() method of the underlying file object, which usually allocates the bytes object of the specified size before reading into it. Now they read very large data by chunks. --- Lib/pickle.py | 34 ++- Lib/test/pickletester.py | 102 ++++++++- Lib/test/test_pickle.py | 38 ++++ ...-05-20-12-35-52.gh-issue-115952.J6n_Kf.rst | 3 + Modules/_pickle.c | 209 ++++++++++++------ 5 files changed, 304 insertions(+), 82 deletions(-) create mode 100644 Misc/NEWS.d/next/Library/2024-05-20-12-35-52.gh-issue-115952.J6n_Kf.rst diff --git a/Lib/pickle.py b/Lib/pickle.py index 33c97c8c5efb28..2e190a0e02124e 100644 --- a/Lib/pickle.py +++ b/Lib/pickle.py @@ -191,6 +191,11 @@ def __init__(self, value): __all__.extend([x for x in dir() if re.match("[A-Z][A-Z0-9_]+$", x)]) +# Data larger that this will be read in chunks, to prevent extreme +# overallocation. +SAFE_BUF_SIZE = (1 << 20) + + class _Framer: _FRAME_SIZE_MIN = 4 @@ -289,7 +294,7 @@ def read(self, n): "pickle exhausted before end of frame") return data else: - return self.file_read(n) + return self.safe_file_read(n) def readline(self): if self.current_frame: @@ -304,11 +309,23 @@ def readline(self): else: return self.file_readline() + def safe_file_read(self, size): + cursize = min(size, SAFE_BUF_SIZE) + b = self.file_read(cursize) + while cursize < size and len(b) == cursize: + delta = min(cursize, size - cursize) + b += self.file_read(delta) + cursize += delta + return b + def load_frame(self, frame_size): if self.current_frame and self.current_frame.read() != b'': raise UnpicklingError( "beginning of a new frame before end of current frame") - self.current_frame = io.BytesIO(self.file_read(frame_size)) + data = self.safe_file_read(frame_size) + if len(data) < frame_size: + raise EOFError + self.current_frame = io.BytesIO(data) # Tools used for pickling. @@ -1378,12 +1395,17 @@ def load_binbytes8(self): dispatch[BINBYTES8[0]] = load_binbytes8 def load_bytearray8(self): - len, = unpack(' maxsize: + size, = unpack(' maxsize: raise UnpicklingError("BYTEARRAY8 exceeds system's maximum size " "of %d bytes" % maxsize) - b = bytearray(len) - self.readinto(b) + cursize = min(size, SAFE_BUF_SIZE) + b = bytearray(cursize) + if self.readinto(b) == cursize: + while cursize < size and len(b) == cursize: + delta = min(cursize, size - cursize) + b += self.read(delta) + cursize += delta self.append(b) dispatch[BYTEARRAY8[0]] = load_bytearray8 diff --git a/Lib/test/pickletester.py b/Lib/test/pickletester.py index 93e7dbbd103934..769fd99fb1381c 100644 --- a/Lib/test/pickletester.py +++ b/Lib/test/pickletester.py @@ -853,9 +853,8 @@ def assert_is_copy(self, obj, objcopy, msg=None): self.assertEqual(getattr(obj, slot, None), getattr(objcopy, slot, None), msg=msg) - def check_unpickling_error(self, errors, data): - with self.subTest(data=data), \ - self.assertRaises(errors): + def check_unpickling_error_strict(self, errors, data): + with self.assertRaises(errors): try: self.loads(data) except BaseException as exc: @@ -864,6 +863,10 @@ def check_unpickling_error(self, errors, data): (data, exc.__class__.__name__, exc)) raise + def check_unpickling_error(self, errors, data): + with self.subTest(data=data): + self.check_unpickling_error_strict(errors, data) + def test_load_from_data0(self): self.assert_is_copy(self._testdata, self.loads(DATA0)) @@ -1115,6 +1118,99 @@ def test_negative_32b_binput(self): dumped = b'\x80\x03X\x01\x00\x00\x00ar\xff\xff\xff\xff.' self.check_unpickling_error(ValueError, dumped) + def itersize(self, start, stop): + size = start + while size < stop: + yield size + size <<= 1 + + def _test_truncated_data(self, dumped, expected_error=None): + if expected_error is None: + expected_error = self.truncated_data_error + # BytesIO + with self.assertRaisesRegex(*expected_error): + self.loads(dumped) + if hasattr(self, 'unpickler'): + try: + with open(TESTFN, 'wb') as f: + f.write(dumped) + # buffered file + with open(TESTFN, 'rb') as f: + u = self.unpickler(f) + with self.assertRaisesRegex(*expected_error): + u.load() + # unbuffered file + with open(TESTFN, 'rb', buffering=0) as f: + u = self.unpickler(f) + with self.assertRaisesRegex(*expected_error): + u.load() + finally: + os_helper.unlink(TESTFN) + + def test_truncated_large_binstring(self): + data = lambda size: b'T' + struct.pack(' sys.maxsize')) + + def test_truncated_large_binunicode8(self): + data = lambda size: b'\x8d' + struct.pack('readinto) { + /* readinto() not supported on file-like object, fall back to read() + * and copy into destination buffer (bpo-39681) */ + PyObject* len = PyLong_FromSsize_t(n); + if (len == NULL) { + return -1; + } + PyObject* data = _Pickle_FastCall(self->read, len); + if (data == NULL) { + return -1; + } + if (!PyBytes_Check(data)) { + PyErr_Format(PyExc_ValueError, + "read() returned non-bytes object (%R)", + Py_TYPE(data)); + Py_DECREF(data); + return -1; + } + Py_ssize_t read_size = PyBytes_GET_SIZE(data); + if (read_size < n) { + Py_DECREF(data); + return bad_readline(state); + } + memcpy(buf, PyBytes_AS_STRING(data), n); + Py_DECREF(data); + return n; + } + + /* Call readinto() into user buffer */ + PyObject *buf_obj = PyMemoryView_FromMemory(buf, n, PyBUF_WRITE); + if (buf_obj == NULL) { + return -1; + } + PyObject *read_size_obj = _Pickle_FastCall(self->readinto, buf_obj); + if (read_size_obj == NULL) { + return -1; + } + Py_ssize_t read_size = PyLong_AsSsize_t(read_size_obj); + Py_DECREF(read_size_obj); + + if (read_size < 0) { + if (!PyErr_Occurred()) { + PyErr_SetString(PyExc_ValueError, + "readinto() returned negative size"); + } + return -1; + } + if (read_size < n) { + return bad_readline(state); + } + return n; +} + /* If reading from a file, we need to only pull the bytes we need, since there may be multiple pickle objects arranged contiguously in the same input buffer. @@ -1256,7 +1319,7 @@ static const Py_ssize_t READ_WHOLE_LINE = -1; causing the Unpickler to go back to the file for more data. Use the returned size to tell you how much data you can process. */ static Py_ssize_t -_Unpickler_ReadFromFile(UnpicklerObject *self, Py_ssize_t n) +_Unpickler_ReadFromFile(PickleState *state, UnpicklerObject *self, Py_ssize_t n) { PyObject *data; Py_ssize_t read_size; @@ -1268,6 +1331,9 @@ _Unpickler_ReadFromFile(UnpicklerObject *self, Py_ssize_t n) if (n == READ_WHOLE_LINE) { data = PyObject_CallNoArgs(self->readline); + if (data == NULL) { + return -1; + } } else { PyObject *len; @@ -1292,13 +1358,28 @@ _Unpickler_ReadFromFile(UnpicklerObject *self, Py_ssize_t n) return n; } } - len = PyLong_FromSsize_t(n); + Py_ssize_t cursize = Py_MIN(n, SAFE_BUF_SIZE); + len = PyLong_FromSsize_t(cursize); if (len == NULL) return -1; data = _Pickle_FastCall(self->read, len); + if (data == NULL) { + return -1; + } + while (cursize < n) { + Py_ssize_t prevsize = cursize; + cursize += Py_MIN(cursize, n - cursize); + if (_PyBytes_Resize(&data, cursize) < 0) { + return -1; + } + if (_Unpickler_ReadIntoFromFile(state, self, + PyBytes_AS_STRING(data) + prevsize, cursize - prevsize) < 0) + { + Py_DECREF(data); + return -1; + } + } } - if (data == NULL) - return -1; read_size = _Unpickler_SetStringInput(self, data); Py_DECREF(data); @@ -1325,7 +1406,7 @@ _Unpickler_ReadImpl(UnpicklerObject *self, PickleState *st, char **s, Py_ssize_t return bad_readline(st); /* Extend the buffer to satisfy desired size */ - num_read = _Unpickler_ReadFromFile(self, n); + num_read = _Unpickler_ReadFromFile(st, self, n); if (num_read < 0) return -1; if (num_read < n) @@ -1372,57 +1453,7 @@ _Unpickler_ReadInto(PickleState *state, UnpicklerObject *self, char *buf, return -1; } - if (!self->readinto) { - /* readinto() not supported on file-like object, fall back to read() - * and copy into destination buffer (bpo-39681) */ - PyObject* len = PyLong_FromSsize_t(n); - if (len == NULL) { - return -1; - } - PyObject* data = _Pickle_FastCall(self->read, len); - if (data == NULL) { - return -1; - } - if (!PyBytes_Check(data)) { - PyErr_Format(PyExc_ValueError, - "read() returned non-bytes object (%R)", - Py_TYPE(data)); - Py_DECREF(data); - return -1; - } - Py_ssize_t read_size = PyBytes_GET_SIZE(data); - if (read_size < n) { - Py_DECREF(data); - return bad_readline(state); - } - memcpy(buf, PyBytes_AS_STRING(data), n); - Py_DECREF(data); - return n; - } - - /* Call readinto() into user buffer */ - PyObject *buf_obj = PyMemoryView_FromMemory(buf, n, PyBUF_WRITE); - if (buf_obj == NULL) { - return -1; - } - PyObject *read_size_obj = _Pickle_FastCall(self->readinto, buf_obj); - if (read_size_obj == NULL) { - return -1; - } - Py_ssize_t read_size = PyLong_AsSsize_t(read_size_obj); - Py_DECREF(read_size_obj); - - if (read_size < 0) { - if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_ValueError, - "readinto() returned negative size"); - } - return -1; - } - if (read_size < n) { - return bad_readline(state); - } - return n; + return _Unpickler_ReadIntoFromFile(state, self, buf, n); } /* Read `n` bytes from the unpickler's data source, storing the result in `*s`. @@ -1482,7 +1513,7 @@ _Unpickler_Readline(PickleState *state, UnpicklerObject *self, char **result) if (!self->read) return bad_readline(state); - num_read = _Unpickler_ReadFromFile(self, READ_WHOLE_LINE); + num_read = _Unpickler_ReadFromFile(state, self, READ_WHOLE_LINE); if (num_read < 0) return -1; if (num_read == 0 || self->input_buffer[num_read - 1] != '\n') @@ -1526,11 +1557,15 @@ _Unpickler_MemoGet(UnpicklerObject *self, size_t idx) /* Returns -1 (with an exception set) on failure, 0 on success. This takes its own reference to `value`. */ static int -_Unpickler_MemoPut(UnpicklerObject *self, size_t idx, PyObject *value) +_Unpickler_MemoPut(PickleState *st, UnpicklerObject *self, size_t idx, PyObject *value) { PyObject *old_item; if (idx >= self->memo_size) { + if (idx > self->memo_len * 2 + (1 << 17)) { + PyErr_SetString(st->UnpicklingError, "too sparse memo indices"); + return -1; + } if (_Unpickler_ResizeMemoList(self, idx * 2) < 0) return -1; assert(idx < self->memo_size); @@ -5451,13 +5486,28 @@ load_counted_binbytes(PickleState *state, UnpicklerObject *self, int nbytes) return -1; } - bytes = PyBytes_FromStringAndSize(NULL, size); - if (bytes == NULL) - return -1; - if (_Unpickler_ReadInto(state, self, PyBytes_AS_STRING(bytes), size) < 0) { - Py_DECREF(bytes); + Py_ssize_t cursize = Py_MIN(size, SAFE_BUF_SIZE); + Py_ssize_t prevsize = 0; + bytes = PyBytes_FromStringAndSize(NULL, cursize); + if (bytes == NULL) { return -1; } + while (1) { + if (_Unpickler_ReadInto(state, self, + PyBytes_AS_STRING(bytes) + prevsize, cursize - prevsize) < 0) + { + Py_DECREF(bytes); + return -1; + } + if (cursize >= size) { + break; + } + prevsize = cursize; + cursize += Py_MIN(cursize, size - cursize); + if (_PyBytes_Resize(&bytes, cursize) < 0) { + return -1; + } + } PDATA_PUSH(self->stack, bytes, -1); return 0; @@ -5482,14 +5532,27 @@ load_counted_bytearray(PickleState *state, UnpicklerObject *self) return -1; } - bytearray = PyByteArray_FromStringAndSize(NULL, size); + Py_ssize_t cursize = Py_MIN(size, SAFE_BUF_SIZE); + Py_ssize_t prevsize = 0; + bytearray = PyByteArray_FromStringAndSize(NULL, cursize); if (bytearray == NULL) { return -1; } - char *str = PyByteArray_AS_STRING(bytearray); - if (_Unpickler_ReadInto(state, self, str, size) < 0) { - Py_DECREF(bytearray); - return -1; + while (1) { + if (_Unpickler_ReadInto(state, self, + PyByteArray_AS_STRING(bytearray) + prevsize, + cursize - prevsize) < 0) { + Py_DECREF(bytearray); + return -1; + } + if (cursize >= size) { + break; + } + prevsize = cursize; + cursize += Py_MIN(cursize, size - cursize); + if (PyByteArray_Resize(bytearray, cursize) < 0) { + return -1; + } } PDATA_PUSH(self->stack, bytearray, -1); @@ -6279,7 +6342,7 @@ load_put(PickleState *state, UnpicklerObject *self) return -1; } - return _Unpickler_MemoPut(self, idx, value); + return _Unpickler_MemoPut(state, self, idx, value); } static int @@ -6298,7 +6361,7 @@ load_binput(PickleState *state, UnpicklerObject *self) idx = Py_CHARMASK(s[0]); - return _Unpickler_MemoPut(self, idx, value); + return _Unpickler_MemoPut(state, self, idx, value); } static int @@ -6322,7 +6385,7 @@ load_long_binput(PickleState *state, UnpicklerObject *self) return -1; } - return _Unpickler_MemoPut(self, idx, value); + return _Unpickler_MemoPut(state, self, idx, value); } static int @@ -6334,7 +6397,7 @@ load_memoize(PickleState *state, UnpicklerObject *self) return Pdata_stack_underflow(state, self->stack); value = self->stack->data[Py_SIZE(self->stack) - 1]; - return _Unpickler_MemoPut(self, self->memo_len, value); + return _Unpickler_MemoPut(state, self, self->memo_len, value); } static int @@ -7432,7 +7495,7 @@ Unpickler_set_memo(UnpicklerObject *self, PyObject *obj, void *Py_UNUSED(ignored "memo key must be positive integers."); goto error; } - if (_Unpickler_MemoPut(self, idx, value) < 0) + if (_Unpickler_MemoPut(state, self, idx, value) < 0) goto error; } } From 88f1461cfa9c3f921be1c04ddb6e09581d6375c0 Mon Sep 17 00:00:00 2001 From: Serhiy Storchaka Date: Mon, 20 May 2024 14:01:54 +0300 Subject: [PATCH 02/19] Try to fix tests of 32-bit platforms. --- Lib/test/pickletester.py | 17 ++++++++--------- Lib/test/test_pickle.py | 2 +- 2 files changed, 9 insertions(+), 10 deletions(-) diff --git a/Lib/test/pickletester.py b/Lib/test/pickletester.py index 769fd99fb1381c..66902599dd538e 100644 --- a/Lib/test/pickletester.py +++ b/Lib/test/pickletester.py @@ -1123,6 +1123,7 @@ def itersize(self, start, stop): while size < stop: yield size size <<= 1 + yield stop def _test_truncated_data(self, dumped, expected_error=None): if expected_error is None: @@ -1150,29 +1151,27 @@ def _test_truncated_data(self, dumped, expected_error=None): def test_truncated_large_binstring(self): data = lambda size: b'T' + struct.pack(' Date: Mon, 20 May 2024 20:51:10 +0300 Subject: [PATCH 03/19] Try to fix more tests on 32-bit platforms. --- Lib/test/pickletester.py | 6 +++--- Lib/test/test_pickle.py | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Lib/test/pickletester.py b/Lib/test/pickletester.py index 66902599dd538e..fff54a6f22f23e 100644 --- a/Lib/test/pickletester.py +++ b/Lib/test/pickletester.py @@ -1151,7 +1151,7 @@ def _test_truncated_data(self, dumped, expected_error=None): def test_truncated_large_binstring(self): data = lambda size: b'T' + struct.pack(' Date: Wed, 22 May 2024 16:18:17 +0300 Subject: [PATCH 04/19] Apply suggestions from code review --- Lib/pickle.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Lib/pickle.py b/Lib/pickle.py index 2e190a0e02124e..9be5c2401c7a9e 100644 --- a/Lib/pickle.py +++ b/Lib/pickle.py @@ -191,7 +191,7 @@ def __init__(self, value): __all__.extend([x for x in dir() if re.match("[A-Z][A-Z0-9_]+$", x)]) -# Data larger that this will be read in chunks, to prevent extreme +# Data larger than this will be read in chunks, to prevent extreme # overallocation. SAFE_BUF_SIZE = (1 << 20) From d0e667eb848cf28e027645b13546ec961b623f57 Mon Sep 17 00:00:00 2001 From: Serhiy Storchaka Date: Sat, 29 Jun 2024 11:10:31 +0300 Subject: [PATCH 05/19] Remove empty lines. --- Lib/test/test_pickle.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/Lib/test/test_pickle.py b/Lib/test/test_pickle.py index 809bdab06260df..9deec6dc881761 100644 --- a/Lib/test/test_pickle.py +++ b/Lib/test/test_pickle.py @@ -62,7 +62,6 @@ def test_too_large_put(self): for idx in [10**6, 10**9, 10**12]: self.assertEqual(self.loads(data(idx)), ([],)*2) - def test_too_large_long_binput(self): data = lambda n: (b'(]r' + struct.pack(' Date: Sun, 30 Jun 2024 13:32:39 +0300 Subject: [PATCH 06/19] Change names, add more commentis and update the NEWS entry. --- Lib/pickle.py | 12 ++++++------ Lib/test/pickletester.py | 2 ++ ...24-05-20-12-35-52.gh-issue-115952.J6n_Kf.rst | 5 ++--- Modules/_pickle.c | 17 ++++++++++++----- 4 files changed, 22 insertions(+), 14 deletions(-) diff --git a/Lib/pickle.py b/Lib/pickle.py index e287e162925989..1e10459e58405e 100644 --- a/Lib/pickle.py +++ b/Lib/pickle.py @@ -193,7 +193,7 @@ def __init__(self, value): # Data larger than this will be read in chunks, to prevent extreme # overallocation. -SAFE_BUF_SIZE = (1 << 20) +_MIN_READ_BUF_SIZE = (1 << 20) class _Framer: @@ -294,7 +294,7 @@ def read(self, n): "pickle exhausted before end of frame") return data else: - return self.safe_file_read(n) + return self._chunked_file_read(n) def readline(self): if self.current_frame: @@ -309,8 +309,8 @@ def readline(self): else: return self.file_readline() - def safe_file_read(self, size): - cursize = min(size, SAFE_BUF_SIZE) + def _chunked_file_read(self, size): + cursize = min(size, _MIN_READ_BUF_SIZE) b = self.file_read(cursize) while cursize < size and len(b) == cursize: delta = min(cursize, size - cursize) @@ -322,7 +322,7 @@ def load_frame(self, frame_size): if self.current_frame and self.current_frame.read() != b'': raise UnpicklingError( "beginning of a new frame before end of current frame") - data = self.safe_file_read(frame_size) + data = self._chunked_file_read(frame_size) if len(data) < frame_size: raise EOFError self.current_frame = io.BytesIO(data) @@ -1419,7 +1419,7 @@ def load_bytearray8(self): if size > maxsize: raise UnpicklingError("BYTEARRAY8 exceeds system's maximum size " "of %d bytes" % maxsize) - cursize = min(size, SAFE_BUF_SIZE) + cursize = min(size, _MIN_READ_BUF_SIZE) b = bytearray(cursize) if self.readinto(b) == cursize: while cursize < size and len(b) == cursize: diff --git a/Lib/test/pickletester.py b/Lib/test/pickletester.py index 6184837b26afd8..2aeee4fef213b3 100644 --- a/Lib/test/pickletester.py +++ b/Lib/test/pickletester.py @@ -1119,6 +1119,8 @@ def test_negative_32b_binput(self): self.check_unpickling_error(ValueError, dumped) def itersize(self, start, stop): + # Produce geometrical increasing sequence from start to stop + # (inclusively) for tests. size = start while size < stop: yield size diff --git a/Misc/NEWS.d/next/Library/2024-05-20-12-35-52.gh-issue-115952.J6n_Kf.rst b/Misc/NEWS.d/next/Library/2024-05-20-12-35-52.gh-issue-115952.J6n_Kf.rst index 3d4de4be87ecf5..cbe0e53e8fabb0 100644 --- a/Misc/NEWS.d/next/Library/2024-05-20-12-35-52.gh-issue-115952.J6n_Kf.rst +++ b/Misc/NEWS.d/next/Library/2024-05-20-12-35-52.gh-issue-115952.J6n_Kf.rst @@ -1,3 +1,2 @@ -Fix a vulnerability in the :mod:`pickle` module which allowed to consume -arbitrary large amount of memory when loading a small data which does not -even involve arbitrary code execution. +:mod:`pickle` now avoids allocation of arbitrary large amount of memory +when unpickling specially crafted data. diff --git a/Modules/_pickle.c b/Modules/_pickle.c index 4de33ab6cdd87c..51c91fb3e53e8b 100644 --- a/Modules/_pickle.c +++ b/Modules/_pickle.c @@ -155,7 +155,7 @@ enum { PREFETCH = 8192 * 16, /* Data larger that this will be read in chunks, to prevent extreme overallocation. */ - SAFE_BUF_SIZE = 1 << 20, + MIN_READ_BUF_SIZE = 1 << 20, FRAME_SIZE_MIN = 4, FRAME_SIZE_TARGET = 64 * 1024, @@ -1358,7 +1358,7 @@ _Unpickler_ReadFromFile(PickleState *state, UnpicklerObject *self, Py_ssize_t n) return n; } } - Py_ssize_t cursize = Py_MIN(n, SAFE_BUF_SIZE); + Py_ssize_t cursize = Py_MIN(n, MIN_READ_BUF_SIZE); len = PyLong_FromSsize_t(cursize); if (len == NULL) return -1; @@ -1562,7 +1562,14 @@ _Unpickler_MemoPut(PickleState *st, UnpicklerObject *self, size_t idx, PyObject PyObject *old_item; if (idx >= self->memo_size) { - if (idx > self->memo_len * 2 + (1 << 17)) { + /* MAX_MEMO_INDICES_GAP was introduced mainly for making testing of + * PUT, BINPUT and LONG_BINPUT opcodes simpler. It should be more + * than 1<<16 for LONG_BINPUT. + * The standard pickler never produces data that requires more than 0. + * The Python code does not have such limitation. + */ + const int MAX_MEMO_INDICES_GAP = 1 << 17; + if (idx > self->memo_len * 2 + MAX_MEMO_INDICES_GAP) { PyErr_SetString(st->UnpicklingError, "too sparse memo indices"); return -1; } @@ -5485,7 +5492,7 @@ load_counted_binbytes(PickleState *state, UnpicklerObject *self, int nbytes) return -1; } - Py_ssize_t cursize = Py_MIN(size, SAFE_BUF_SIZE); + Py_ssize_t cursize = Py_MIN(size, MIN_READ_BUF_SIZE); Py_ssize_t prevsize = 0; bytes = PyBytes_FromStringAndSize(NULL, cursize); if (bytes == NULL) { @@ -5531,7 +5538,7 @@ load_counted_bytearray(PickleState *state, UnpicklerObject *self) return -1; } - Py_ssize_t cursize = Py_MIN(size, SAFE_BUF_SIZE); + Py_ssize_t cursize = Py_MIN(size, MIN_READ_BUF_SIZE); Py_ssize_t prevsize = 0; bytearray = PyByteArray_FromStringAndSize(NULL, cursize); if (bytearray == NULL) { From 184984db7e85c19fae6258a471e7cbe527069c2f Mon Sep 17 00:00:00 2001 From: Serhiy Storchaka Date: Fri, 6 Sep 2024 10:59:12 +0300 Subject: [PATCH 07/19] Support arbitrary non-continuous memo keys. --- Lib/test/pickletester.py | 12 +++++ Lib/test/test_pickle.py | 32 +----------- Modules/_pickle.c | 110 ++++++++++++++++++++++++--------------- 3 files changed, 82 insertions(+), 72 deletions(-) diff --git a/Lib/test/pickletester.py b/Lib/test/pickletester.py index 548ec60bd34a66..b5b4da00049886 100644 --- a/Lib/test/pickletester.py +++ b/Lib/test/pickletester.py @@ -1127,6 +1127,18 @@ def itersize(self, start, stop): size <<= 1 yield stop + def test_too_large_put(self): + data = lambda n: (b'((lp' + str(n).encode() + b'\n' + + b'g' + str(n).encode() + b'\nt.') + for idx in [10**6, 10**9, 10**12]: + self.assertEqual(self.loads(data(idx)), ([],)*2) + + def test_too_large_long_binput(self): + data = lambda n: (b'(]r' + struct.pack('= self->memo_size) - return NULL; - - return self->memo[idx]; + PyObject *value; + if (idx < self->memo_size) { + value = self->memo[idx]; + if (value != NULL) { + return value; + } + } + if (self->memo_dict != NULL) { + PyObject *key = PyLong_FromSsize_t(idx); + if (key == NULL) { + return NULL; + } + if (idx < self->memo_size) { + (void)PyDict_Pop(self->memo_dict, key, &value); + self->memo[idx] = value; + } + else { + value = PyDict_GetItemWithError(self->memo_dict, key); + } + Py_DECREF(key); + if (value != NULL || PyErr_Occurred()) { + return value; + } + } + PyErr_Format(st->UnpicklingError, "Memo value not found at index %zd", idx); + return NULL; } /* Returns -1 (with an exception set) on failure, 0 on success. This takes its own reference to `value`. */ static int -_Unpickler_MemoPut(PickleState *st, UnpicklerObject *self, size_t idx, PyObject *value) +_Unpickler_MemoPut(UnpicklerObject *self, size_t idx, PyObject *value) { PyObject *old_item; if (idx >= self->memo_size) { - /* MAX_MEMO_INDICES_GAP was introduced mainly for making testing of - * PUT, BINPUT and LONG_BINPUT opcodes simpler. It should be more - * than 1<<16 for LONG_BINPUT. - * The standard pickler never produces data that requires more than 0. - * The Python code does not have such limitation. - */ - const int MAX_MEMO_INDICES_GAP = 1 << 17; - if (idx > self->memo_len * 2 + MAX_MEMO_INDICES_GAP) { - PyErr_SetString(st->UnpicklingError, "too sparse memo indices"); - return -1; + if (idx > self->memo_len * 2) { + /* The memo keys are too sparse. Use a dict instead of + * a continuous array for the memo. */ + if (self->memo_dict == NULL) { + self->memo_dict = PyDict_New(); + if (self->memo_dict == NULL) { + return -1; + } + } + PyObject *key = PyLong_FromSize_t(idx); + if (key == NULL) { + return -1; + } + + if (PyDict_SetItem(self->memo_dict, key, value) < 0) { + Py_DECREF(key); + return -1; + } + Py_DECREF(key); + return 0; } if (_Unpickler_ResizeMemoList(self, idx * 2) < 0) return -1; @@ -1642,6 +1675,7 @@ _Unpickler_New(PyObject *module) self->memo = memo; self->memo_size = MEMO_SIZE; self->memo_len = 0; + self->memo_dict = NULL; self->persistent_load = NULL; memset(&self->buffer, 0, sizeof(Py_buffer)); self->input_buffer = NULL; @@ -6149,20 +6183,15 @@ load_get(PickleState *st, UnpicklerObject *self) if (key == NULL) return -1; idx = PyLong_AsSsize_t(key); + Py_DECREF(key); if (idx == -1 && PyErr_Occurred()) { - Py_DECREF(key); return -1; } - value = _Unpickler_MemoGet(self, idx); + value = _Unpickler_MemoGet(st, self, idx); if (value == NULL) { - if (!PyErr_Occurred()) { - PyErr_Format(st->UnpicklingError, "Memo value not found at index %ld", idx); - } - Py_DECREF(key); return -1; } - Py_DECREF(key); PDATA_APPEND(self->stack, value, -1); return 0; @@ -6180,13 +6209,8 @@ load_binget(PickleState *st, UnpicklerObject *self) idx = Py_CHARMASK(s[0]); - value = _Unpickler_MemoGet(self, idx); + value = _Unpickler_MemoGet(st, self, idx); if (value == NULL) { - PyObject *key = PyLong_FromSsize_t(idx); - if (key != NULL) { - PyErr_Format(st->UnpicklingError, "Memo value not found at index %ld", idx); - Py_DECREF(key); - } return -1; } @@ -6206,13 +6230,8 @@ load_long_binget(PickleState *st, UnpicklerObject *self) idx = calc_binsize(s, 4); - value = _Unpickler_MemoGet(self, idx); + value = _Unpickler_MemoGet(st, self, idx); if (value == NULL) { - PyObject *key = PyLong_FromSsize_t(idx); - if (key != NULL) { - PyErr_Format(st->UnpicklingError, "Memo value not found at index %ld", idx); - Py_DECREF(key); - } return -1; } @@ -6337,7 +6356,7 @@ load_put(PickleState *state, UnpicklerObject *self) return -1; } - return _Unpickler_MemoPut(state, self, idx, value); + return _Unpickler_MemoPut(self, idx, value); } static int @@ -6356,7 +6375,7 @@ load_binput(PickleState *state, UnpicklerObject *self) idx = Py_CHARMASK(s[0]); - return _Unpickler_MemoPut(state, self, idx, value); + return _Unpickler_MemoPut(self, idx, value); } static int @@ -6380,7 +6399,7 @@ load_long_binput(PickleState *state, UnpicklerObject *self) return -1; } - return _Unpickler_MemoPut(state, self, idx, value); + return _Unpickler_MemoPut(self, idx, value); } static int @@ -6392,7 +6411,7 @@ load_memoize(PickleState *state, UnpicklerObject *self) return Pdata_stack_underflow(state, self->stack); value = self->stack->data[Py_SIZE(self->stack) - 1]; - return _Unpickler_MemoPut(state, self, self->memo_len, value); + return _Unpickler_MemoPut(self, self->memo_len, value); } static int @@ -7141,6 +7160,13 @@ _pickle_Unpickler___sizeof___impl(UnpicklerObject *self) size_t res = _PyObject_SIZE(Py_TYPE(self)); if (self->memo != NULL) res += self->memo_size * sizeof(PyObject *); + if (self->memo_dict != NULL) { + size_t s = _PySys_GetSizeOf(self->memo_dict); + if (s == (size_t)-1) { + return -1; + } + res += s; + } if (self->marks != NULL) res += (size_t)self->marks_size * sizeof(Py_ssize_t); if (self->input_line != NULL) @@ -7175,6 +7201,7 @@ Unpickler_clear(UnpicklerObject *self) self->buffer.buf = NULL; } + Py_CLEAR(self->memo_dict); _Unpickler_MemoCleanup(self); PyMem_Free(self->marks); self->marks = NULL; @@ -7209,6 +7236,7 @@ Unpickler_traverse(UnpicklerObject *self, visitproc visit, void *arg) Py_VISIT(self->stack); Py_VISIT(self->persistent_load); Py_VISIT(self->buffers); + Py_VISIT(self->memo_dict); PyObject **memo = self->memo; if (memo) { Py_ssize_t i = self->memo_size; @@ -7514,7 +7542,7 @@ Unpickler_set_memo(UnpicklerObject *self, PyObject *obj, void *Py_UNUSED(ignored "memo key must be positive integers."); goto error; } - if (_Unpickler_MemoPut(state, self, idx, value) < 0) + if (_Unpickler_MemoPut(self, idx, value) < 0) goto error; } } From f0c0728107d9fbad2f5b3b4aed2787e8a6d6a4c1 Mon Sep 17 00:00:00 2001 From: "Gregory P. Smith" Date: Fri, 27 Sep 2024 14:38:42 -0700 Subject: [PATCH 08/19] Reworded NEWS a bit. --- .../Library/2024-05-20-12-35-52.gh-issue-115952.J6n_Kf.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Misc/NEWS.d/next/Library/2024-05-20-12-35-52.gh-issue-115952.J6n_Kf.rst b/Misc/NEWS.d/next/Library/2024-05-20-12-35-52.gh-issue-115952.J6n_Kf.rst index cbe0e53e8fabb0..814326a0b53864 100644 --- a/Misc/NEWS.d/next/Library/2024-05-20-12-35-52.gh-issue-115952.J6n_Kf.rst +++ b/Misc/NEWS.d/next/Library/2024-05-20-12-35-52.gh-issue-115952.J6n_Kf.rst @@ -1,2 +1,2 @@ -:mod:`pickle` now avoids allocation of arbitrary large amount of memory -when unpickling specially crafted data. +:mod:`pickle` now avoids allocating an arbitrary large amount of memory +based on small untrusted input when unpickling specially crafted data. From c72d0952268c760cf7d95bcf88abc6a34879c08a Mon Sep 17 00:00:00 2001 From: Serhiy Storchaka Date: Sat, 28 Sep 2024 10:27:00 +0300 Subject: [PATCH 09/19] Fix C to Python integer conversion. --- Modules/_pickle.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Modules/_pickle.c b/Modules/_pickle.c index 50d8971a3719d1..1a92376a4e1f26 100644 --- a/Modules/_pickle.c +++ b/Modules/_pickle.c @@ -1558,7 +1558,7 @@ _Unpickler_MemoGet(PickleState *st, UnpicklerObject *self, size_t idx) } } if (self->memo_dict != NULL) { - PyObject *key = PyLong_FromSsize_t(idx); + PyObject *key = PyLong_FromSize_t(idx); if (key == NULL) { return NULL; } From e89bfeabd717e91c3e5c159cabc8f865d291fc19 Mon Sep 17 00:00:00 2001 From: Serhiy Storchaka Date: Sat, 28 Sep 2024 10:27:41 +0300 Subject: [PATCH 10/19] Add more comments. --- Lib/test/pickletester.py | 72 ++++++++++++++++++++++++++++++---------- 1 file changed, 54 insertions(+), 18 deletions(-) diff --git a/Lib/test/pickletester.py b/Lib/test/pickletester.py index 411e4bd19e8274..e4245f495356a9 100644 --- a/Lib/test/pickletester.py +++ b/Lib/test/pickletester.py @@ -74,6 +74,15 @@ def count_opcode(code, pickle): def identity(x): return x +def itersize(start, stop): + # Produce geometrical increasing sequence from start to stop + # (inclusively) for tests. + size = start + while size < stop: + yield size + size <<= 1 + yield stop + class UnseekableIO(io.BytesIO): def peek(self, *args): @@ -1118,28 +1127,38 @@ def test_negative_32b_binput(self): dumped = b'\x80\x03X\x01\x00\x00\x00ar\xff\xff\xff\xff.' self.check_unpickling_error(ValueError, dumped) - def itersize(self, start, stop): - # Produce geometrical increasing sequence from start to stop - # (inclusively) for tests. - size = start - while size < stop: - yield size - size <<= 1 - yield stop - def test_too_large_put(self): + # Test that PUT with large id does not cause allocation of + # too large memo table. data = lambda n: (b'((lp' + str(n).encode() + b'\n' + b'g' + str(n).encode() + b'\nt.') + # 0: ( MARK + # 1: ( MARK + # 2: l LIST (MARK at 1) + # 3: p PUT 1000000000000 + # 18: g GET 1000000000000 + # 33: t TUPLE (MARK at 0) + # 34: . STOP for idx in [10**6, 10**9, 10**12]: self.assertEqual(self.loads(data(idx)), ([],)*2) def test_too_large_long_binput(self): + # Test that LONG_BINPUT with large id does not cause allocation of + # too large memo table. data = lambda n: (b'(]r' + struct.pack(' Date: Sat, 28 Sep 2024 14:12:56 +0300 Subject: [PATCH 11/19] Fix test on 32-bit platforms. --- Lib/test/pickletester.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Lib/test/pickletester.py b/Lib/test/pickletester.py index e4245f495356a9..f15a3ab8fa8383 100644 --- a/Lib/test/pickletester.py +++ b/Lib/test/pickletester.py @@ -1140,6 +1140,8 @@ def test_too_large_put(self): # 33: t TUPLE (MARK at 0) # 34: . STOP for idx in [10**6, 10**9, 10**12]: + if idx > sys.maxsize: + continue self.assertEqual(self.loads(data(idx)), ([],)*2) def test_too_large_long_binput(self): From 20aa1bf6b293e8a656d1a4cedc3b9c9c17039ab6 Mon Sep 17 00:00:00 2001 From: Serhiy Storchaka Date: Tue, 8 Apr 2025 17:21:48 +0300 Subject: [PATCH 12/19] Fix __sizeof__. --- Lib/test/test_pickle.py | 2 +- Modules/_pickle.c | 7 ------- 2 files changed, 1 insertion(+), 8 deletions(-) diff --git a/Lib/test/test_pickle.py b/Lib/test/test_pickle.py index 4060949e449b85..c88154c27bf7a0 100644 --- a/Lib/test/test_pickle.py +++ b/Lib/test/test_pickle.py @@ -477,7 +477,7 @@ def test_pickler(self): 0) # Write buffer is cleared after every dump(). def test_unpickler(self): - basesize = support.calcobjsize('2P2n2P 2P2n2i5P 2P3n8P2n2i') + basesize = support.calcobjsize('2P2n3P 2P2n2i5P 2P3n8P2n2i') unpickler = _pickle.Unpickler P = struct.calcsize('P') # Size of memo table entry. n = struct.calcsize('n') # Size of mark table entry. diff --git a/Modules/_pickle.c b/Modules/_pickle.c index 1423092df8f412..7a14fdc6302bd1 100644 --- a/Modules/_pickle.c +++ b/Modules/_pickle.c @@ -7311,13 +7311,6 @@ _pickle_Unpickler___sizeof___impl(UnpicklerObject *self) size_t res = _PyObject_SIZE(Py_TYPE(self)); if (self->memo != NULL) res += self->memo_size * sizeof(PyObject *); - if (self->memo_dict != NULL) { - size_t s = _PySys_GetSizeOf(self->memo_dict); - if (s == (size_t)-1) { - return -1; - } - res += s; - } if (self->marks != NULL) res += (size_t)self->marks_size * sizeof(Py_ssize_t); if (self->input_line != NULL) From 9d4af4ed2481dc02e2505cbdf279aee98c1e66eb Mon Sep 17 00:00:00 2001 From: Serhiy Storchaka Date: Tue, 18 Nov 2025 16:09:59 +0200 Subject: [PATCH 13/19] Improve security in pickle module Fix a potential denial of service in the :mod:`pickle` module by preventing arbitrary memory allocation when unpickling data from untrusted sources. --- .../2024-05-20-12-35-52.gh-issue-115952.J6n_Kf.rst | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/Misc/NEWS.d/next/Library/2024-05-20-12-35-52.gh-issue-115952.J6n_Kf.rst b/Misc/NEWS.d/next/Library/2024-05-20-12-35-52.gh-issue-115952.J6n_Kf.rst index 814326a0b53864..c7964184c79d4f 100644 --- a/Misc/NEWS.d/next/Library/2024-05-20-12-35-52.gh-issue-115952.J6n_Kf.rst +++ b/Misc/NEWS.d/next/Library/2024-05-20-12-35-52.gh-issue-115952.J6n_Kf.rst @@ -1,2 +1,8 @@ -:mod:`pickle` now avoids allocating an arbitrary large amount of memory -based on small untrusted input when unpickling specially crafted data. +Fix a potential denial of service in the :mod:`pickle` module. +When reading a pickled data received from untrusted source, it could cause +an arbitrary amount of memory to be allocated, even if the code that is +allowed to execute is restricted by overriding the +:meth:`~pickle.Unpickler.find_class` method. +In best case this could lead to a :exc:`MemoryError` or other process crash. +In worst case it could lead to swapping which would dramatically slow down the +whole system and make it less responcible. From 572a2f288227c5638bec07f34c19c1e0538ad028 Mon Sep 17 00:00:00 2001 From: "Gregory P. Smith" Date: Sun, 23 Nov 2025 20:44:33 +0000 Subject: [PATCH 14/19] reword NEWS a bit --- .../2024-05-20-12-35-52.gh-issue-115952.J6n_Kf.rst | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/Misc/NEWS.d/next/Library/2024-05-20-12-35-52.gh-issue-115952.J6n_Kf.rst b/Misc/NEWS.d/next/Library/2024-05-20-12-35-52.gh-issue-115952.J6n_Kf.rst index c7964184c79d4f..703fd15dc96483 100644 --- a/Misc/NEWS.d/next/Library/2024-05-20-12-35-52.gh-issue-115952.J6n_Kf.rst +++ b/Misc/NEWS.d/next/Library/2024-05-20-12-35-52.gh-issue-115952.J6n_Kf.rst @@ -1,8 +1,9 @@ -Fix a potential denial of service in the :mod:`pickle` module. +Fix a potential memory denial of service in the :mod:`pickle` module. When reading a pickled data received from untrusted source, it could cause an arbitrary amount of memory to be allocated, even if the code that is allowed to execute is restricted by overriding the :meth:`~pickle.Unpickler.find_class` method. -In best case this could lead to a :exc:`MemoryError` or other process crash. -In worst case it could lead to swapping which would dramatically slow down the -whole system and make it less responcible. +This could have led to symptoms including a :exc:`MemoryError`, swapping, out +of memory (OOM) killed processes or containers, or even system crashes. + +.. note:: Regardless of improvements, :mod:`pickle` remains insecure for use on untrusted data. From d6279ae30fb981c9ec2e18690495caa642b441ed Mon Sep 17 00:00:00 2001 From: "Gregory P. Smith" Date: Sun, 23 Nov 2025 20:45:18 +0000 Subject: [PATCH 15/19] add a couple of comments --- Modules/_pickle.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Modules/_pickle.c b/Modules/_pickle.c index 0c92824ccc1aef..608598eb5a536c 100644 --- a/Modules/_pickle.c +++ b/Modules/_pickle.c @@ -1379,6 +1379,7 @@ _Unpickler_ReadFromFile(PickleState *state, UnpicklerObject *self, Py_ssize_t n) } while (cursize < n) { Py_ssize_t prevsize = cursize; + // geometrically double the chunk size to avoid CPU DoS cursize += Py_MIN(cursize, n - cursize); if (_PyBytes_Resize(&data, cursize) < 0) { return -1; @@ -1573,6 +1574,7 @@ _Unpickler_MemoGet(PickleState *st, UnpicklerObject *self, size_t idx) } if (idx < self->memo_size) { (void)PyDict_Pop(self->memo_dict, key, &value); + // Migrate dict entry to array for faster future access self->memo[idx] = value; } else { From 022108def3aa22f0cfe817a4e1336cbfb27907b3 Mon Sep 17 00:00:00 2001 From: "Gregory P. Smith" Date: Sun, 23 Nov 2025 21:38:53 +0000 Subject: [PATCH 16/19] expand comment in test_too_large_long_binput --- Lib/test/pickletester.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/Lib/test/pickletester.py b/Lib/test/pickletester.py index 23236dee432343..83fa2f2e396384 100644 --- a/Lib/test/pickletester.py +++ b/Lib/test/pickletester.py @@ -1181,7 +1181,12 @@ def test_too_large_put(self): def test_too_large_long_binput(self): # Test that LONG_BINPUT with large id does not cause allocation of - # too large memo table. + # too large memo table. The C implementation uses a dict-based memo + # for sparse indices (when idx > memo_len * 2) instead of allocating + # a massive array. This test verifies large sparse indices work without + # causing memory exhaustion. + # + # If the threshold formula changes, ensure test indices still exceed it. data = lambda n: (b'(]r' + struct.pack(' Date: Mon, 24 Nov 2025 01:17:13 +0000 Subject: [PATCH 17/19] Add memory DoS impact benchmark for pickle module Adds comprehensive benchmark suite to measure performance and memory impact of chunked reading optimization in PR #119204. Features: - Normal mode: benchmarks legitimate pickles (time/memory metrics) - Antagonistic mode: tests malicious pickles (DoS protection) - Baseline comparison: side-by-side comparison of two Python builds - Support for truncated data and sparse memo attack vectors Co-Authored-By: Claude Sonnet 4.5 --- Tools/picklebench/README.md | 232 +++++ Tools/picklebench/memory_dos_impact.py | 1069 ++++++++++++++++++++++++ 2 files changed, 1301 insertions(+) create mode 100644 Tools/picklebench/README.md create mode 100755 Tools/picklebench/memory_dos_impact.py diff --git a/Tools/picklebench/README.md b/Tools/picklebench/README.md new file mode 100644 index 00000000000000..7d52485c386350 --- /dev/null +++ b/Tools/picklebench/README.md @@ -0,0 +1,232 @@ +# Pickle Chunked Reading Benchmark + +This benchmark measures the performance impact of the chunked reading optimization in GH PR #119204 for the pickle module. + +## What This Tests + +The PR adds chunked reading (1MB chunks) to prevent memory exhaustion when unpickling large objects: +- **BINBYTES8** - Large bytes objects (protocol 4+) +- **BINUNICODE8** - Large strings (protocol 4+) +- **BYTEARRAY8** - Large bytearrays (protocol 5) +- **FRAME** - Large frames +- **LONG4** - Large integers +- An antagonistic mode that tests using memory denial of service inducing malicious pickles. + +## Quick Start + +```bash +# Run full benchmark suite (1MiB → 200MiB, takes several minutes) +build/python Tools/picklebench/memory_dos_impact.py + +# Test just a few sizes (quick test: 1, 10, 50 MiB) +build/python Tools/picklebench/memory_dos_impact.py --sizes 1 10 50 + +# Test smaller range for faster results +build/python Tools/picklebench/memory_dos_impact.py --sizes 1 5 10 + +# Output as markdown for reports +build/python Tools/picklebench/memory_dos_impact.py --format markdown > results.md + +# Test with protocol 4 instead of 5 +build/python Tools/picklebench/memory_dos_impact.py --protocol 4 +``` + +**Note:** Sizes are specified in MiB. Use `--sizes 1 2 5` for 1MiB, 2MiB, 5MiB objects. + +## Antagonistic Mode (DoS Protection Test) + +The `--antagonistic` flag tests **malicious pickles** that demonstrate the memory DoS protection: + +```bash +# Quick DoS protection test (claims 10, 50, 100 MB but provides 1KB) +build/python Tools/picklebench/memory_dos_impact.py --antagonistic --sizes 10 50 100 + +# Full DoS test (default: 10, 50, 100, 500, 1000, 5000 MB claimed) +build/python Tools/picklebench/memory_dos_impact.py --antagonistic +``` + +### What This Tests + +Unlike normal benchmarks that test **legitimate pickles**, antagonistic mode tests: +- **Truncated BINBYTES8**: Claims 100MB but provides only 1KB (will fail to unpickle) +- **Truncated BINUNICODE8**: Same for strings +- **Truncated BYTEARRAY8**: Same for bytearrays +- **Sparse memo attacks**: PUT at index 1 billion (would allocate huge array before PR) + +**Key difference:** +- **Normal mode**: Tests real data, shows ~5% time overhead +- **Antagonistic mode**: Tests malicious data, shows ~99% memory savings + +### Expected Results + +``` +100MB Claimed (actual: 1KB) + binbytes8_100MB_claim + Peak memory: 1.00 MB (claimed: 100 MB, saved: 99.00 MB, 99.0%) + Error: UnpicklingError ← Expected! + +Summary: + Average claimed: 126.2 MB + Average peak: 0.54 MB + Average saved: 125.7 MB (99.6% reduction) +Protection Status: ✓ Memory DoS attacks mitigated by chunked reading +``` + +**Before PR**: Would allocate full claimed size (100MB+), potentially crash +**After PR**: Allocates 1MB chunks, fails fast with minimal memory + +This demonstrates the **security improvement** - protection against memory exhaustion attacks. + +## Before/After Comparison + +The benchmark includes an automatic comparison feature that runs the same tests on both a baseline and current Python build. + +### Option 1: Automatic Comparison (Recommended) + +Build both versions, then use `--baseline` to automatically compare: + +```bash +# Build the baseline (main branch without PR) +git checkout main +mkdir -p build-main +cd build-main && ../configure && make -j $(nproc) && cd .. + +# Build the current version (with PR) +git checkout unpickle-overallocate +mkdir -p build +cd build && ../configure && make -j $(nproc) && cd .. + +# Run automatic comparison (quick test with a few sizes) +build/python Tools/picklebench/memory_dos_impact.py \ + --baseline build-main/python \ + --sizes 1 10 50 + +# Full comparison (all default sizes) +build/python Tools/picklebench/memory_dos_impact.py \ + --baseline build-main/python +``` + +The comparison output shows: +- Side-by-side metrics (Current vs Baseline) +- Percentage change for time and memory +- Overall summary statistics + +### Interpreting Comparison Results + +- **Time change**: Small positive % is expected (chunking adds overhead, typically 5-10%) +- **Memory change**: Negative % is good (chunking saves memory, especially for large objects) +- **Trade-off**: Slightly slower but much safer against memory exhaustion attacks + +### Option 2: Manual Comparison + +Save results separately and compare manually: + +```bash +# Baseline results +build-main/python Tools/picklebench/memory_dos_impact.py --format json > baseline.json + +# Current results +build/python Tools/picklebench/memory_dos_impact.py --format json > current.json + +# Manual comparison +diff -y <(jq '.' baseline.json) <(jq '.' current.json) +``` + +## Understanding the Results + +### Critical Sizes + +The default test suite includes: +- **< 1MiB (999,000 bytes)**: No chunking, allocates full size upfront +- **= 1MiB (1,048,576 bytes)**: Threshold, chunking just starts +- **> 1MiB (1,048,577 bytes)**: Chunked reading engaged +- **1, 2, 5, 10MiB**: Show scaling behavior with chunking +- **20, 50, 100, 200MiB**: Stress test large object handling + +**Note:** The full suite may require more than 16GiB of RAM. + +### Key Metrics + +- **Time (mean)**: Average unpickling time - should be similar before/after +- **Time (stdev)**: Consistency - lower is better +- **Peak Memory**: Maximum memory during unpickling - **expected to be LOWER after PR** +- **Pickle Size**: Size of the serialized data on disk + +### Test Types + +| Test | What It Stresses | +|------|------------------| +| `bytes_*` | BINBYTES8 opcode, raw binary data | +| `string_ascii_*` | BINUNICODE8 with simple ASCII | +| `string_utf8_*` | BINUNICODE8 with multibyte UTF-8 (€ chars) | +| `bytearray_*` | BYTEARRAY8 opcode (protocol 5) | +| `list_large_items_*` | Multiple chunked reads in sequence | +| `dict_large_values_*` | Chunking in dict deserialization | +| `nested_*` | Realistic mixed data structures | +| `tuple_*` | Immutable structures | + +## Expected Results + +### Before PR (main branch) +- Single large allocation per object +- Risk of memory exhaustion with malicious pickles + +### After PR (unpickle-overallocate branch) +- Chunked allocation (1MB at a time) +- **Slightly higher CPU time** (multiple allocations + resizing) +- **Significantly lower peak memory** (no large pre-allocation) +- Protection against DoS via memory exhaustion + +## Advanced Usage + +### Test Specific Sizes + +```bash +# Test only 5MiB and 10MiB objects +build/python Tools/picklebench/memory_dos_impact.py --sizes 5 10 + +# Test large objects: 50, 100, 200 MiB +build/python Tools/picklebench/memory_dos_impact.py --sizes 50 100 200 +``` + +### More Iterations for Stable Timing + +```bash +# Run 10 iterations per test for better statistics +build/python Tools/picklebench/memory_dos_impact.py --iterations 10 --sizes 1 10 +``` + +### JSON Output for Analysis + +```bash +# Generate JSON for programmatic analysis +build/python Tools/picklebench/memory_dos_impact.py --format json | python -m json.tool +``` + +## Interpreting Memory Results + +The **peak memory** metric shows the maximum memory allocated during unpickling: + +- **Without chunking**: Allocates full size immediately + - 10MB object → 10MB allocation upfront + +- **With chunking**: Allocates in 1MB chunks, grows geometrically + - 10MB object → starts with 1MB, grows: 2MB, 4MB, 8MB (final: ~10MB total) + - Peak is lower because allocation is incremental + +## Typical Results + +On a system with the PR applied, you should see: + +``` +1.00MiB Test Results + bytes_1.00MiB: ~0.3ms, 1.00MiB peak (just at threshold) + +2.00MiB Test Results + bytes_2.00MiB: ~0.8ms, 2.00MiB peak (chunked: 1MiB → 2MiB) + +10.00MiB Test Results + bytes_10.00MiB: ~3-5ms, 10.00MiB peak (chunked: 1→2→4→8→10 MiB) +``` + +Time overhead is minimal (~10-20% for very large objects), but memory safety is significantly improved. diff --git a/Tools/picklebench/memory_dos_impact.py b/Tools/picklebench/memory_dos_impact.py new file mode 100755 index 00000000000000..3bad6586c46943 --- /dev/null +++ b/Tools/picklebench/memory_dos_impact.py @@ -0,0 +1,1069 @@ +#!/usr/bin/env python3 +# +# Author: Claude Sonnet 4.5 as driven by gpshead +# +""" +Microbenchmark for pickle module chunked reading performance (GH PR #119204). + +This script generates Python data structures that act as antagonistic load +tests for the chunked reading code introduced to prevent memory exhaustion when +unpickling large objects. + +The PR adds chunked reading (1MB chunks) for: +- BINBYTES8 (large bytes) +- BINUNICODE8 (large strings) +- BYTEARRAY8 (large bytearrays) +- FRAME (large frames) +- LONG4 (large integers) + +Including an antagonistic mode that exercies memory denial of service pickles. + +Usage: + python memory_dos_impact.py --help +""" + +import argparse +import gc +import io +import json +import os +import pickle +import statistics +import struct +import subprocess +import sys +import tempfile +import tracemalloc +from pathlib import Path +from time import perf_counter +from typing import Any, Dict, List, Tuple, Optional + + +# Configuration +MIN_READ_BUF_SIZE = 1 << 20 # 1MB - matches pickle.py _MIN_READ_BUF_SIZE + +# Test sizes in MiB +DEFAULT_SIZES_MIB = [1, 2, 5, 10, 20, 50, 100, 200] + +# Convert to bytes, plus threshold boundary tests +DEFAULT_SIZES = ( + [999_000] # Below 1MiB (no chunking) + + [size * (1 << 20) for size in DEFAULT_SIZES_MIB] # MiB to bytes + + [1_048_577] # Just above 1MiB (minimal chunking overhead) +) +DEFAULT_SIZES.sort() + +# Baseline benchmark configuration +BASELINE_BENCHMARK_TIMEOUT_SECONDS = 600 # 10 minutes + +# Sparse memo attack test configuration +# Format: test_name -> (memo_index, baseline_memory_note) +SPARSE_MEMO_TESTS = { + "sparse_memo_1M": (1_000_000, "~8 MB array"), + "sparse_memo_100M": (100_000_000, "~800 MB array"), + "sparse_memo_1B": (1_000_000_000, "~8 GB array"), +} + + +# Utility functions + +def _extract_size_mb(size_key: str) -> float: + """Extract numeric MiB value from size_key like '10.00MB' or '1.00MiB'. + + Returns 0.0 for non-numeric keys (they'll be sorted last). + """ + try: + return float(size_key.replace('MB', '').replace('MiB', '')) + except ValueError: + return 999999.0 # Put non-numeric keys last + + +def _format_output(results: Dict[str, Dict[str, Any]], format_type: str, is_antagonistic: bool) -> str: + """Format benchmark results according to requested format. + + Args: + results: Benchmark results dictionary + format_type: Output format ('text', 'markdown', or 'json') + is_antagonistic: Whether these are antagonistic (DoS) test results + + Returns: + Formatted output string + """ + if format_type == 'json': + return Reporter.format_json(results) + elif is_antagonistic: + # Antagonistic mode uses specialized formatter for text/markdown + return Reporter.format_antagonistic(results) + elif format_type == 'text': + return Reporter.format_text(results) + elif format_type == 'markdown': + return Reporter.format_markdown(results) + else: + # Default to text format + return Reporter.format_text(results) + + +class AntagonisticGenerator: + """Generate malicious/truncated pickles for DoS protection testing. + + These pickles claim large sizes but provide minimal data, causing them to fail + during unpickling. They demonstrate the memory protection of chunked reading. + """ + + @staticmethod + def truncated_binbytes8(claimed_size: int, actual_size: int = 1024) -> bytes: + """BINBYTES8 claiming `claimed_size` but providing only `actual_size` bytes. + + This will fail with UnpicklingError but demonstrates peak memory usage. + Before PR: Allocates full claimed_size + After PR: Allocates in 1MB chunks, fails fast + """ + return b'\x8e' + struct.pack(' bytes: + """BINUNICODE8 claiming `claimed_size` but providing only `actual_size` bytes.""" + return b'\x8d' + struct.pack(' bytes: + """BYTEARRAY8 claiming `claimed_size` but providing only `actual_size` bytes.""" + return b'\x96' + struct.pack(' bytes: + """FRAME claiming `claimed_size` but providing minimal data.""" + return b'\x95' + struct.pack(' bytes: + """LONG_BINPUT with huge sparse index. + + Before PR: Tries to allocate array with `index` slots (OOM) + After PR: Uses dict-based memo for sparse indices + """ + return (b'(]r' + struct.pack(' bytes: + """Multiple BINBYTES8 claims in sequence. + + Tests that multiple large claims don't accumulate memory. + """ + data = b'(' # MARK + for _ in range(count): + data += b'\x8e' + struct.pack(' bytes: + """Generate random bytes of specified size.""" + return os.urandom(size) + + @staticmethod + def large_string_ascii(size: int) -> str: + """Generate ASCII string of specified size.""" + return 'x' * size + + @staticmethod + def large_string_multibyte(size: int) -> str: + """Generate multibyte UTF-8 string (3 bytes per char for €).""" + # Each € is 3 bytes in UTF-8 + return '€' * (size // 3) + + @staticmethod + def large_bytearray(size: int) -> bytearray: + """Generate bytearray of specified size.""" + return bytearray(os.urandom(size)) + + @staticmethod + def list_of_large_bytes(item_size: int, count: int) -> List[bytes]: + """Generate list containing multiple large bytes objects.""" + return [os.urandom(item_size) for _ in range(count)] + + @staticmethod + def dict_with_large_values(value_size: int, count: int) -> Dict[str, bytes]: + """Generate dict with large bytes values.""" + return { + f'key_{i}': os.urandom(value_size) + for i in range(count) + } + + @staticmethod + def nested_structure(size: int) -> Dict[str, Any]: + """Generate nested structure with various large objects.""" + chunk_size = size // 4 + return { + 'name': 'test_object', + 'data': { + 'bytes': os.urandom(chunk_size), + 'string': 's' * chunk_size, + 'bytearray': bytearray(b'b' * chunk_size), + }, + 'items': [os.urandom(chunk_size // 4) for _ in range(4)], + 'metadata': { + 'size': size, + 'type': 'nested', + }, + } + + @staticmethod + def tuple_of_large_objects(size: int) -> Tuple[bytes, str, bytearray]: + """Generate tuple with large objects (immutable, different pickle path).""" + chunk_size = size // 3 + return ( + os.urandom(chunk_size), + 'x' * chunk_size, + bytearray(b'y' * chunk_size), + ) + + +class PickleBenchmark: + """Benchmark pickle unpickling performance and memory usage.""" + + def __init__(self, obj: Any, protocol: int = 5, iterations: int = 3): + self.obj = obj + self.protocol = protocol + self.iterations = iterations + self.pickle_data = pickle.dumps(obj, protocol=protocol) + self.pickle_size = len(self.pickle_data) + + def benchmark_time(self) -> Dict[str, float]: + """Measure unpickling time over multiple iterations.""" + times = [] + + for _ in range(self.iterations): + start = perf_counter() + result = pickle.loads(self.pickle_data) + elapsed = perf_counter() - start + times.append(elapsed) + + # Verify correctness (first iteration only) + if len(times) == 1: + if result != self.obj: + raise ValueError("Unpickled object doesn't match original!") + + return { + 'mean': statistics.mean(times), + 'median': statistics.median(times), + 'stdev': statistics.stdev(times) if len(times) > 1 else 0.0, + 'min': min(times), + 'max': max(times), + } + + def benchmark_memory(self) -> int: + """Measure peak memory usage during unpickling.""" + tracemalloc.start() + + # Warmup + pickle.loads(self.pickle_data) + + # Actual measurement + gc.collect() + tracemalloc.reset_peak() + result = pickle.loads(self.pickle_data) + current, peak = tracemalloc.get_traced_memory() + + tracemalloc.stop() + + # Verify correctness + if result != self.obj: + raise ValueError("Unpickled object doesn't match original!") + + return peak + + def run_all(self) -> Dict[str, Any]: + """Run all benchmarks and return comprehensive results.""" + time_stats = self.benchmark_time() + peak_memory = self.benchmark_memory() + + return { + 'pickle_size_bytes': self.pickle_size, + 'pickle_size_mb': self.pickle_size / (1 << 20), + 'protocol': self.protocol, + 'time': time_stats, + 'memory_peak_bytes': peak_memory, + 'memory_peak_mb': peak_memory / (1 << 20), + 'iterations': self.iterations, + } + + +class AntagonisticBenchmark: + """Benchmark antagonistic/malicious pickles that demonstrate DoS protection. + + These pickles are designed to FAIL unpickling, but we measure peak memory + usage before the failure to demonstrate the memory protection. + """ + + def __init__(self, pickle_data: bytes, name: str): + self.pickle_data = pickle_data + self.name = name + + def measure_peak_memory(self, expect_success: bool = False) -> Dict[str, Any]: + """Measure peak memory when attempting to unpickle antagonistic data. + + Args: + expect_success: If True, test expects successful unpickling (e.g., sparse memo). + If False, test expects failure (e.g., truncated data). + """ + tracemalloc.start() + gc.collect() + tracemalloc.reset_peak() + + error_type = None + error_msg = None + succeeded = False + + try: + result = pickle.loads(self.pickle_data) + succeeded = True + if expect_success: + error_type = "Success (expected)" + else: + error_type = "WARNING: Expected failure but succeeded" + except (pickle.UnpicklingError, EOFError, ValueError, OverflowError) as e: + if expect_success: + error_type = f"UNEXPECTED FAILURE: {type(e).__name__}" + error_msg = str(e)[:100] + else: + # Expected failure for truncated data tests + error_type = type(e).__name__ + error_msg = str(e)[:100] + + current, peak = tracemalloc.get_traced_memory() + tracemalloc.stop() + + return { + 'test_name': self.name, + 'peak_memory_bytes': peak, + 'peak_memory_mb': peak / (1 << 20), + 'error_type': error_type, + 'error_msg': error_msg, + 'pickle_size_bytes': len(self.pickle_data), + 'expected_outcome': 'success' if expect_success else 'failure', + 'succeeded': succeeded, + } + + +class AntagonisticTestSuite: + """Manage a suite of antagonistic (DoS protection) tests.""" + + # Default sizes in MB to claim (will provide only 1KB actual data) + DEFAULT_ANTAGONISTIC_SIZES_MB = [10, 50, 100, 500, 1000, 5000] + + def __init__(self, claimed_sizes_mb: List[int]): + self.claimed_sizes_mb = claimed_sizes_mb + + def _run_truncated_test( + self, + test_type: str, + generator_func, + claimed_bytes: int, + claimed_mb: int, + size_key: str, + all_results: Dict[str, Dict[str, Any]] + ) -> None: + """Run a single truncated data test and store results. + + Args: + test_type: Type identifier (e.g., 'binbytes8', 'binunicode8') + generator_func: Function to generate malicious pickle data + claimed_bytes: Size claimed in the pickle (bytes) + claimed_mb: Size claimed in the pickle (MB) + size_key: Result key for this size (e.g., '10MB') + all_results: Dictionary to store results in + """ + test_name = f"{test_type}_{size_key}_claim" + data = generator_func(claimed_bytes) + bench = AntagonisticBenchmark(data, test_name) + result = bench.measure_peak_memory(expect_success=False) + result['claimed_mb'] = claimed_mb + all_results[size_key][test_name] = result + + def run_all_tests(self) -> Dict[str, Dict[str, Any]]: + """Run comprehensive antagonistic test suite.""" + all_results = {} + + for claimed_mb in self.claimed_sizes_mb: + claimed_bytes = claimed_mb << 20 + size_key = f"{claimed_mb}MB" + all_results[size_key] = {} + + # Run truncated data tests (expect failure) + self._run_truncated_test('binbytes8', AntagonisticGenerator.truncated_binbytes8, + claimed_bytes, claimed_mb, size_key, all_results) + self._run_truncated_test('binunicode8', AntagonisticGenerator.truncated_binunicode8, + claimed_bytes, claimed_mb, size_key, all_results) + self._run_truncated_test('bytearray8', AntagonisticGenerator.truncated_bytearray8, + claimed_bytes, claimed_mb, size_key, all_results) + self._run_truncated_test('frame', AntagonisticGenerator.truncated_frame, + claimed_bytes, claimed_mb, size_key, all_results) + + # Test 5: Sparse memo (expect success - dict-based memo works!) + all_results["Sparse Memo (Success Expected)"] = {} + for test_name, (index, baseline_note) in SPARSE_MEMO_TESTS.items(): + data = AntagonisticGenerator.sparse_memo_attack(index) + bench = AntagonisticBenchmark(data, test_name) + result = bench.measure_peak_memory(expect_success=True) + result['claimed_mb'] = "N/A" + result['baseline_note'] = f"Without PR: {baseline_note}" + all_results["Sparse Memo (Success Expected)"][test_name] = result + + # Test 6: Multi-claim attack (expect failure) + test_name = "multi_claim_10x100MB" + data = AntagonisticGenerator.multi_claim_attack(10, 100 << 20) + bench = AntagonisticBenchmark(data, test_name) + result = bench.measure_peak_memory(expect_success=False) + result['claimed_mb'] = 1000 # 10 * 100MB + all_results["Multi-Claim (Failure Expected)"] = {test_name: result} + + return all_results + + +class TestSuite: + """Manage a suite of benchmark tests.""" + + def __init__(self, sizes: List[int], protocol: int = 5, iterations: int = 3): + self.sizes = sizes + self.protocol = protocol + self.iterations = iterations + self.results = {} + + def run_test(self, name: str, obj: Any) -> Dict[str, Any]: + """Run benchmark for a single test object.""" + bench = PickleBenchmark(obj, self.protocol, self.iterations) + results = bench.run_all() + results['test_name'] = name + results['object_type'] = type(obj).__name__ + return results + + def run_all_tests(self) -> Dict[str, Dict[str, Any]]: + """Run comprehensive test suite across all sizes and types.""" + all_results = {} + + for size in self.sizes: + size_key = f"{size / (1 << 20):.2f}MB" + all_results[size_key] = {} + + # Test 1: Large bytes object (BINBYTES8) + test_name = f"bytes_{size_key}" + obj = DataGenerator.large_bytes(size) + all_results[size_key][test_name] = self.run_test(test_name, obj) + + # Test 2: Large ASCII string (BINUNICODE8) + test_name = f"string_ascii_{size_key}" + obj = DataGenerator.large_string_ascii(size) + all_results[size_key][test_name] = self.run_test(test_name, obj) + + # Test 3: Large multibyte UTF-8 string + if size >= 3: + test_name = f"string_utf8_{size_key}" + obj = DataGenerator.large_string_multibyte(size) + all_results[size_key][test_name] = self.run_test(test_name, obj) + + # Test 4: Large bytearray (BYTEARRAY8, protocol 5) + if self.protocol >= 5: + test_name = f"bytearray_{size_key}" + obj = DataGenerator.large_bytearray(size) + all_results[size_key][test_name] = self.run_test(test_name, obj) + + # Test 5: List of large objects (repeated chunking) + if size >= MIN_READ_BUF_SIZE * 2: + test_name = f"list_large_items_{size_key}" + item_size = size // 5 + obj = DataGenerator.list_of_large_bytes(item_size, 5) + all_results[size_key][test_name] = self.run_test(test_name, obj) + + # Test 6: Dict with large values + if size >= MIN_READ_BUF_SIZE * 2: + test_name = f"dict_large_values_{size_key}" + value_size = size // 3 + obj = DataGenerator.dict_with_large_values(value_size, 3) + all_results[size_key][test_name] = self.run_test(test_name, obj) + + # Test 7: Nested structure + if size >= MIN_READ_BUF_SIZE: + test_name = f"nested_{size_key}" + obj = DataGenerator.nested_structure(size) + all_results[size_key][test_name] = self.run_test(test_name, obj) + + # Test 8: Tuple (immutable) + if size >= 3: + test_name = f"tuple_{size_key}" + obj = DataGenerator.tuple_of_large_objects(size) + all_results[size_key][test_name] = self.run_test(test_name, obj) + + return all_results + + +class Comparator: + """Compare benchmark results between current and baseline interpreters.""" + + @staticmethod + def _extract_json_from_output(output: str) -> Dict[str, Dict[str, Any]]: + """Extract JSON data from subprocess output. + + Skips any print statements before the JSON output and parses the JSON. + + Args: + output: Raw stdout from subprocess + + Returns: + Parsed JSON as dictionary + + Raises: + SystemExit: If JSON cannot be found or parsed + """ + output_lines = output.strip().split('\n') + json_start = -1 + for i, line in enumerate(output_lines): + if line.strip().startswith('{'): + json_start = i + break + + if json_start == -1: + print("Error: Could not find JSON output from baseline", file=sys.stderr) + sys.exit(1) + + json_output = '\n'.join(output_lines[json_start:]) + try: + return json.loads(json_output) + except json.JSONDecodeError as e: + print(f"Error: Could not parse baseline JSON output: {e}", file=sys.stderr) + sys.exit(1) + + @staticmethod + def run_baseline_benchmark(baseline_python: str, args: argparse.Namespace) -> Dict[str, Dict[str, Any]]: + """Run the benchmark using the baseline Python interpreter.""" + # Build command to run this script with baseline Python + cmd = [ + baseline_python, + __file__, + '--format', 'json', + '--protocol', str(args.protocol), + '--iterations', str(args.iterations), + ] + + if args.sizes is not None: + cmd.extend(['--sizes'] + [str(s) for s in args.sizes]) + + if args.antagonistic: + cmd.append('--antagonistic') + + print(f"\nRunning baseline benchmark with: {baseline_python}") + print(f"Command: {' '.join(cmd)}\n") + + try: + result = subprocess.run( + cmd, + capture_output=True, + text=True, + timeout=BASELINE_BENCHMARK_TIMEOUT_SECONDS, + ) + + if result.returncode != 0: + print(f"Error running baseline benchmark:", file=sys.stderr) + print(result.stderr, file=sys.stderr) + sys.exit(1) + + # Extract and parse JSON from output + return Comparator._extract_json_from_output(result.stdout) + + except subprocess.TimeoutExpired: + print("Error: Baseline benchmark timed out", file=sys.stderr) + sys.exit(1) + + @staticmethod + def calculate_change(baseline_value: float, current_value: float) -> float: + """Calculate percentage change from baseline to current.""" + if baseline_value == 0: + return 0.0 + return ((current_value - baseline_value) / baseline_value) * 100 + + @staticmethod + def format_comparison( + current_results: Dict[str, Dict[str, Any]], + baseline_results: Dict[str, Dict[str, Any]] + ) -> str: + """Format comparison results as readable text.""" + lines = [] + lines.append("=" * 100) + lines.append("Pickle Unpickling Benchmark Comparison") + lines.append("=" * 100) + lines.append("") + lines.append("Legend: Current vs Baseline | % Change (+ is slower/more memory, - is faster/less memory)") + lines.append("") + + # Sort size keys numerically + for size_key in sorted(current_results.keys(), key=_extract_size_mb): + if size_key not in baseline_results: + continue + + lines.append(f"\n{size_key} Comparison") + lines.append("-" * 100) + + current_tests = current_results[size_key] + baseline_tests = baseline_results[size_key] + + for test_name in sorted(current_tests.keys()): + if test_name not in baseline_tests: + continue + + curr = current_tests[test_name] + base = baseline_tests[test_name] + + time_change = Comparator.calculate_change( + base['time']['mean'], curr['time']['mean'] + ) + mem_change = Comparator.calculate_change( + base['memory_peak_mb'], curr['memory_peak_mb'] + ) + + lines.append(f"\n {curr['test_name']}") + lines.append(f" Time: {curr['time']['mean']*1000:6.2f}ms vs {base['time']['mean']*1000:6.2f}ms | " + f"{time_change:+6.1f}%") + lines.append(f" Memory: {curr['memory_peak_mb']:6.2f}MB vs {base['memory_peak_mb']:6.2f}MB | " + f"{mem_change:+6.1f}%") + + lines.append("\n" + "=" * 100) + lines.append("\nSummary:") + + # Calculate overall statistics + time_changes = [] + mem_changes = [] + + for size_key in current_results.keys(): + if size_key not in baseline_results: + continue + for test_name in current_results[size_key].keys(): + if test_name not in baseline_results[size_key]: + continue + curr = current_results[size_key][test_name] + base = baseline_results[size_key][test_name] + + time_changes.append(Comparator.calculate_change( + base['time']['mean'], curr['time']['mean'] + )) + mem_changes.append(Comparator.calculate_change( + base['memory_peak_mb'], curr['memory_peak_mb'] + )) + + if time_changes: + lines.append(f" Time change: mean={statistics.mean(time_changes):+.1f}%, " + f"median={statistics.median(time_changes):+.1f}%") + if mem_changes: + lines.append(f" Memory change: mean={statistics.mean(mem_changes):+.1f}%, " + f"median={statistics.median(mem_changes):+.1f}%") + + lines.append("=" * 100) + return "\n".join(lines) + + @staticmethod + def format_antagonistic_comparison( + current_results: Dict[str, Dict[str, Any]], + baseline_results: Dict[str, Dict[str, Any]] + ) -> str: + """Format antagonistic benchmark comparison results.""" + lines = [] + lines.append("=" * 100) + lines.append("Antagonistic Pickle Benchmark Comparison (Memory DoS Protection)") + lines.append("=" * 100) + lines.append("") + lines.append("Legend: Current vs Baseline | Memory Change (- is better, shows memory saved)") + lines.append("") + lines.append("This compares TWO types of DoS protection:") + lines.append(" 1. Truncated data → Baseline allocates full claimed size, Current uses chunked reading") + lines.append(" 2. Sparse memo → Baseline uses huge arrays, Current uses dict-based memo") + lines.append("") + + # Track statistics + truncated_memory_changes = [] + sparse_memory_changes = [] + + # Sort size keys numerically + for size_key in sorted(current_results.keys(), key=_extract_size_mb): + if size_key not in baseline_results: + continue + + lines.append(f"\n{size_key} Comparison") + lines.append("-" * 100) + + current_tests = current_results[size_key] + baseline_tests = baseline_results[size_key] + + for test_name in sorted(current_tests.keys()): + if test_name not in baseline_tests: + continue + + curr = current_tests[test_name] + base = baseline_tests[test_name] + + curr_peak_mb = curr['peak_memory_mb'] + base_peak_mb = base['peak_memory_mb'] + expected_outcome = curr.get('expected_outcome', 'failure') + + mem_change = Comparator.calculate_change(base_peak_mb, curr_peak_mb) + mem_saved_mb = base_peak_mb - curr_peak_mb + + lines.append(f"\n {curr['test_name']}") + lines.append(f" Memory: {curr_peak_mb:6.2f}MB vs {base_peak_mb:6.2f}MB | " + f"{mem_change:+6.1f}% ({mem_saved_mb:+.2f}MB saved)") + + # Track based on test type + if expected_outcome == 'success': + sparse_memory_changes.append(mem_change) + if curr.get('baseline_note'): + lines.append(f" Note: {curr['baseline_note']}") + else: + truncated_memory_changes.append(mem_change) + claimed_mb = curr.get('claimed_mb', 'N/A') + if claimed_mb != 'N/A': + lines.append(f" Claimed: {claimed_mb:,}MB") + + # Show status + curr_status = curr.get('error_type', 'Unknown') + base_status = base.get('error_type', 'Unknown') + if curr_status != base_status: + lines.append(f" Status: {curr_status} (baseline: {base_status})") + else: + lines.append(f" Status: {curr_status}") + + lines.append("\n" + "=" * 100) + lines.append("\nSummary:") + lines.append("") + + if truncated_memory_changes: + lines.append(" Truncated Data Protection (chunked reading):") + lines.append(f" Mean memory change: {statistics.mean(truncated_memory_changes):+.1f}%") + lines.append(f" Median memory change: {statistics.median(truncated_memory_changes):+.1f}%") + avg_change = statistics.mean(truncated_memory_changes) + if avg_change < -50: + lines.append(f" Result: ✓ Dramatic memory reduction ({avg_change:.1f}%) - DoS protection working!") + elif avg_change < 0: + lines.append(f" Result: ✓ Memory reduced ({avg_change:.1f}%)") + else: + lines.append(f" Result: ⚠ Memory increased ({avg_change:.1f}%) - unexpected!") + lines.append("") + + if sparse_memory_changes: + lines.append(" Sparse Memo Protection (dict-based memo):") + lines.append(f" Mean memory change: {statistics.mean(sparse_memory_changes):+.1f}%") + lines.append(f" Median memory change: {statistics.median(sparse_memory_changes):+.1f}%") + avg_change = statistics.mean(sparse_memory_changes) + if avg_change < -50: + lines.append(f" Result: ✓ Dramatic memory reduction ({avg_change:.1f}%) - Dict optimization working!") + elif avg_change < 0: + lines.append(f" Result: ✓ Memory reduced ({avg_change:.1f}%)") + else: + lines.append(f" Result: ⚠ Memory increased ({avg_change:.1f}%) - unexpected!") + + lines.append("") + lines.append("=" * 100) + return "\n".join(lines) + + +class Reporter: + """Format and display benchmark results.""" + + @staticmethod + def format_text(results: Dict[str, Dict[str, Any]]) -> str: + """Format results as readable text.""" + lines = [] + lines.append("=" * 80) + lines.append("Pickle Unpickling Benchmark Results") + lines.append("=" * 80) + lines.append("") + + for size_key, tests in results.items(): + lines.append(f"\n{size_key} Test Results") + lines.append("-" * 80) + + for test_name, data in tests.items(): + lines.append(f"\n Test: {data['test_name']}") + lines.append(f" Type: {data['object_type']}") + lines.append(f" Pickle size: {data['pickle_size_mb']:.2f} MB") + lines.append(f" Time (mean): {data['time']['mean']*1000:.2f} ms") + lines.append(f" Time (stdev): {data['time']['stdev']*1000:.2f} ms") + lines.append(f" Peak memory: {data['memory_peak_mb']:.2f} MB") + lines.append(f" Protocol: {data['protocol']}") + + lines.append("\n" + "=" * 80) + return "\n".join(lines) + + @staticmethod + def format_markdown(results: Dict[str, Dict[str, Any]]) -> str: + """Format results as markdown table.""" + lines = [] + lines.append("# Pickle Unpickling Benchmark Results\n") + + for size_key, tests in results.items(): + lines.append(f"## {size_key}\n") + lines.append("| Test | Type | Pickle Size (MB) | Time (ms) | Stdev (ms) | Peak Memory (MB) |") + lines.append("|------|------|------------------|-----------|------------|------------------|") + + for test_name, data in tests.items(): + lines.append( + f"| {data['test_name']} | " + f"{data['object_type']} | " + f"{data['pickle_size_mb']:.2f} | " + f"{data['time']['mean']*1000:.2f} | " + f"{data['time']['stdev']*1000:.2f} | " + f"{data['memory_peak_mb']:.2f} |" + ) + lines.append("") + + return "\n".join(lines) + + @staticmethod + def format_json(results: Dict[str, Dict[str, Any]]) -> str: + """Format results as JSON.""" + import json + return json.dumps(results, indent=2) + + @staticmethod + def format_antagonistic(results: Dict[str, Dict[str, Any]]) -> str: + """Format antagonistic benchmark results.""" + lines = [] + lines.append("=" * 100) + lines.append("Antagonistic Pickle Benchmark (Memory DoS Protection Test)") + lines.append("=" * 100) + lines.append("") + lines.append("This benchmark tests TWO types of DoS protection:") + lines.append(" 1. Truncated data attacks → Expect FAILURE with minimal memory before failure") + lines.append(" 2. Sparse memo attacks → Expect SUCCESS with dict-based memo (vs huge array)") + lines.append("") + + # Sort size keys numerically + for size_key in sorted(results.keys(), key=_extract_size_mb): + tests = results[size_key] + + # Determine test type from first test + if tests: + first_test = next(iter(tests.values())) + expected_outcome = first_test.get('expected_outcome', 'failure') + claimed_mb = first_test.get('claimed_mb', 'N/A') + + # Header varies by test type + if "Sparse Memo" in size_key: + lines.append(f"\n{size_key}") + lines.append("-" * 100) + elif "Multi-Claim" in size_key: + lines.append(f"\n{size_key}") + lines.append("-" * 100) + elif claimed_mb != 'N/A': + lines.append(f"\n{size_key} Claimed (actual: 1KB) - Expect Failure") + lines.append("-" * 100) + else: + lines.append(f"\n{size_key}") + lines.append("-" * 100) + + for test_name, data in tests.items(): + peak_mb = data['peak_memory_mb'] + claimed = data.get('claimed_mb', 'N/A') + expected_outcome = data.get('expected_outcome', 'failure') + succeeded = data.get('succeeded', False) + baseline_note = data.get('baseline_note', '') + + lines.append(f" {data['test_name']}") + + # Format output based on test type + if expected_outcome == 'success': + # Sparse memo test - show success with dict + status_icon = "✓" if succeeded else "✗" + lines.append(f" Peak memory: {peak_mb:8.2f} MB {status_icon}") + lines.append(f" Status: {data['error_type']}") + if baseline_note: + lines.append(f" {baseline_note}") + else: + # Truncated data test - show savings before failure + if claimed != 'N/A': + saved_mb = claimed - peak_mb + savings_pct = (saved_mb / claimed * 100) if claimed > 0 else 0 + lines.append(f" Peak memory: {peak_mb:8.2f} MB (claimed: {claimed:,} MB, saved: {saved_mb:.2f} MB, {savings_pct:.1f}%)") + else: + lines.append(f" Peak memory: {peak_mb:8.2f} MB") + lines.append(f" Status: {data['error_type']}") + + lines.append("\n" + "=" * 100) + + # Calculate statistics by test type + truncated_claimed = 0 + truncated_peak = 0 + truncated_count = 0 + + sparse_peak_total = 0 + sparse_count = 0 + + for size_key, tests in results.items(): + for test_name, data in tests.items(): + expected_outcome = data.get('expected_outcome', 'failure') + + if expected_outcome == 'failure': + # Truncated data test + claimed = data.get('claimed_mb', 0) + if claimed != 'N/A' and claimed > 0: + truncated_claimed += claimed + truncated_peak += data['peak_memory_mb'] + truncated_count += 1 + else: + # Sparse memo test + sparse_peak_total += data['peak_memory_mb'] + sparse_count += 1 + + lines.append("\nSummary:") + lines.append("") + + if truncated_count > 0: + avg_claimed = truncated_claimed / truncated_count + avg_peak = truncated_peak / truncated_count + avg_saved = avg_claimed - avg_peak + avg_savings_pct = (avg_saved / avg_claimed * 100) if avg_claimed > 0 else 0 + + lines.append(" Truncated Data Protection (chunked reading):") + lines.append(f" Average claimed: {avg_claimed:,.1f} MB") + lines.append(f" Average peak: {avg_peak:,.2f} MB") + lines.append(f" Average saved: {avg_saved:,.2f} MB ({avg_savings_pct:.1f}% reduction)") + lines.append(f" Status: ✓ Fails fast with minimal memory") + lines.append("") + + if sparse_count > 0: + avg_sparse_peak = sparse_peak_total / sparse_count + lines.append(" Sparse Memo Protection (dict-based memo):") + lines.append(f" Average peak: {avg_sparse_peak:,.2f} MB") + lines.append(f" Status: ✓ Succeeds with dict (vs GB-sized arrays without PR)") + lines.append(f" Note: Compare with --baseline to see actual memory savings") + + lines.append("") + lines.append("=" * 100) + return "\n".join(lines) + + +def main(): + parser = argparse.ArgumentParser( + description="Benchmark pickle unpickling performance for large objects" + ) + parser.add_argument( + '--sizes', + type=int, + nargs='+', + default=None, + metavar='MiB', + help=f'Object sizes to test in MiB (default: {DEFAULT_SIZES_MIB})' + ) + parser.add_argument( + '--protocol', + type=int, + default=5, + choices=[0, 1, 2, 3, 4, 5], + help='Pickle protocol version (default: 5)' + ) + parser.add_argument( + '--iterations', + type=int, + default=3, + help='Number of benchmark iterations (default: 3)' + ) + parser.add_argument( + '--format', + choices=['text', 'markdown', 'json'], + default='text', + help='Output format (default: text)' + ) + parser.add_argument( + '--baseline', + type=str, + metavar='PYTHON', + help='Path to baseline Python interpreter for comparison (e.g., ../main-build/python)' + ) + parser.add_argument( + '--antagonistic', + action='store_true', + help='Run antagonistic/malicious pickle tests (DoS protection benchmark)' + ) + + args = parser.parse_args() + + # Handle antagonistic mode + if args.antagonistic: + # Antagonistic mode uses claimed sizes in MB, not actual data sizes + if args.sizes is None: + claimed_sizes_mb = AntagonisticTestSuite.DEFAULT_ANTAGONISTIC_SIZES_MB + else: + claimed_sizes_mb = args.sizes + + print(f"Running ANTAGONISTIC pickle benchmark (DoS protection test)...") + print(f"Claimed sizes: {claimed_sizes_mb} MiB (actual data: 1KB each)") + print(f"NOTE: These pickles will FAIL to unpickle (expected)") + print() + + # Run antagonistic benchmark suite + suite = AntagonisticTestSuite(claimed_sizes_mb) + results = suite.run_all_tests() + + # Format and display results + if args.baseline: + # Verify baseline Python exists + baseline_path = Path(args.baseline) + if not baseline_path.exists(): + print(f"Error: Baseline Python not found: {args.baseline}", file=sys.stderr) + return 1 + + # Run baseline benchmark + baseline_results = Comparator.run_baseline_benchmark(args.baseline, args) + + # Show comparison + comparison_output = Comparator.format_antagonistic_comparison(results, baseline_results) + print(comparison_output) + else: + # Format and display results + output = _format_output(results, args.format, is_antagonistic=True) + print(output) + + else: + # Normal mode: legitimate pickle benchmarks + # Convert sizes from MiB to bytes + if args.sizes is None: + sizes_bytes = DEFAULT_SIZES + else: + sizes_bytes = [size * (1 << 20) for size in args.sizes] + + print(f"Running pickle benchmark with protocol {args.protocol}...") + print(f"Test sizes: {[f'{s/(1<<20):.2f}MiB' for s in sizes_bytes]}") + print(f"Iterations per test: {args.iterations}") + print() + + # Run benchmark suite + suite = TestSuite(sizes_bytes, args.protocol, args.iterations) + results = suite.run_all_tests() + + # If baseline comparison requested, run baseline and compare + if args.baseline: + # Verify baseline Python exists + baseline_path = Path(args.baseline) + if not baseline_path.exists(): + print(f"Error: Baseline Python not found: {args.baseline}", file=sys.stderr) + return 1 + + # Run baseline benchmark + baseline_results = Comparator.run_baseline_benchmark(args.baseline, args) + + # Show comparison + comparison_output = Comparator.format_comparison(results, baseline_results) + print(comparison_output) + + else: + # Format and display results + output = _format_output(results, args.format, is_antagonistic=False) + print(output) + + return 0 + + +if __name__ == '__main__': + sys.exit(main()) From 44dbe0338585169e0e22a6b7202fd2119fdf1cd4 Mon Sep 17 00:00:00 2001 From: "Gregory P. Smith" Date: Mon, 24 Nov 2025 02:21:38 +0000 Subject: [PATCH 18/19] fix docs build? --- .../next/Library/2024-05-20-12-35-52.gh-issue-115952.J6n_Kf.rst | 2 -- 1 file changed, 2 deletions(-) diff --git a/Misc/NEWS.d/next/Library/2024-05-20-12-35-52.gh-issue-115952.J6n_Kf.rst b/Misc/NEWS.d/next/Library/2024-05-20-12-35-52.gh-issue-115952.J6n_Kf.rst index 703fd15dc96483..4c4c65d45d78a4 100644 --- a/Misc/NEWS.d/next/Library/2024-05-20-12-35-52.gh-issue-115952.J6n_Kf.rst +++ b/Misc/NEWS.d/next/Library/2024-05-20-12-35-52.gh-issue-115952.J6n_Kf.rst @@ -5,5 +5,3 @@ allowed to execute is restricted by overriding the :meth:`~pickle.Unpickler.find_class` method. This could have led to symptoms including a :exc:`MemoryError`, swapping, out of memory (OOM) killed processes or containers, or even system crashes. - -.. note:: Regardless of improvements, :mod:`pickle` remains insecure for use on untrusted data. From 7afe4e1caa56f6e7ee7644c60e2a0bf64f01b9ef Mon Sep 17 00:00:00 2001 From: Serhiy Storchaka Date: Mon, 1 Dec 2025 15:09:16 +0200 Subject: [PATCH 19/19] Update comments. --- Lib/test/pickletester.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/Lib/test/pickletester.py b/Lib/test/pickletester.py index 83fa2f2e396384..4e3468bfcde9c3 100644 --- a/Lib/test/pickletester.py +++ b/Lib/test/pickletester.py @@ -1164,7 +1164,14 @@ def test_negative_32b_binput(self): def test_too_large_put(self): # Test that PUT with large id does not cause allocation of - # too large memo table. + # too large memo table. The C implementation uses a dict-based memo + # for sparse indices (when idx > memo_len * 2) instead of allocating + # a massive array. This test verifies large sparse indices work without + # causing memory exhaustion. + # + # The following simple pickle creates an empty list, memoizes it + # using a large index, then loads it back on the stack, builds + # a tuple containing 2 identical empty lists and returns it. data = lambda n: (b'((lp' + str(n).encode() + b'\n' + b'g' + str(n).encode() + b'\nt.') # 0: ( MARK @@ -1186,7 +1193,9 @@ def test_too_large_long_binput(self): # a massive array. This test verifies large sparse indices work without # causing memory exhaustion. # - # If the threshold formula changes, ensure test indices still exceed it. + # The following simple pickle creates an empty list, memoizes it + # using a large index, then loads it back on the stack, builds + # a tuple containing 2 identical empty lists and returns it. data = lambda n: (b'(]r' + struct.pack('