Permalink
7511 lines (6499 sloc) 213 KB
/* Core extension modules are built-in on some platforms (e.g. Windows). */
#ifdef Py_BUILD_CORE
#define Py_BUILD_CORE_BUILTIN
#undef Py_BUILD_CORE
#endif
#include "Python.h"
#include "structmember.h"
PyDoc_STRVAR(pickle_module_doc,
"Optimized C implementation for the Python pickle module.");
/*[clinic input]
module _pickle
class _pickle.Pickler "PicklerObject *" "&Pickler_Type"
class _pickle.PicklerMemoProxy "PicklerMemoProxyObject *" "&PicklerMemoProxyType"
class _pickle.Unpickler "UnpicklerObject *" "&Unpickler_Type"
class _pickle.UnpicklerMemoProxy "UnpicklerMemoProxyObject *" "&UnpicklerMemoProxyType"
[clinic start generated code]*/
/*[clinic end generated code: output=da39a3ee5e6b4b0d input=4b3e113468a58e6c]*/
/* Bump HIGHEST_PROTOCOL when new opcodes are added to the pickle protocol.
Bump DEFAULT_PROTOCOL only when the oldest still supported version of Python
already includes it. */
enum {
HIGHEST_PROTOCOL = 4,
DEFAULT_PROTOCOL = 4
};
/* Pickle opcodes. These must be kept updated with pickle.py.
Extensive docs are in pickletools.py. */
enum opcode {
MARK = '(',
STOP = '.',
POP = '0',
POP_MARK = '1',
DUP = '2',
FLOAT = 'F',
INT = 'I',
BININT = 'J',
BININT1 = 'K',
LONG = 'L',
BININT2 = 'M',
NONE = 'N',
PERSID = 'P',
BINPERSID = 'Q',
REDUCE = 'R',
STRING = 'S',
BINSTRING = 'T',
SHORT_BINSTRING = 'U',
UNICODE = 'V',
BINUNICODE = 'X',
APPEND = 'a',
BUILD = 'b',
GLOBAL = 'c',
DICT = 'd',
EMPTY_DICT = '}',
APPENDS = 'e',
GET = 'g',
BINGET = 'h',
INST = 'i',
LONG_BINGET = 'j',
LIST = 'l',
EMPTY_LIST = ']',
OBJ = 'o',
PUT = 'p',
BINPUT = 'q',
LONG_BINPUT = 'r',
SETITEM = 's',
TUPLE = 't',
EMPTY_TUPLE = ')',
SETITEMS = 'u',
BINFLOAT = 'G',
/* Protocol 2. */
PROTO = '\x80',
NEWOBJ = '\x81',
EXT1 = '\x82',
EXT2 = '\x83',
EXT4 = '\x84',
TUPLE1 = '\x85',
TUPLE2 = '\x86',
TUPLE3 = '\x87',
NEWTRUE = '\x88',
NEWFALSE = '\x89',
LONG1 = '\x8a',
LONG4 = '\x8b',
/* Protocol 3 (Python 3.x) */
BINBYTES = 'B',
SHORT_BINBYTES = 'C',
/* Protocol 4 */
SHORT_BINUNICODE = '\x8c',
BINUNICODE8 = '\x8d',
BINBYTES8 = '\x8e',
EMPTY_SET = '\x8f',
ADDITEMS = '\x90',
FROZENSET = '\x91',
NEWOBJ_EX = '\x92',
STACK_GLOBAL = '\x93',
MEMOIZE = '\x94',
FRAME = '\x95'
};
enum {
/* Keep in synch with pickle.Pickler._BATCHSIZE. This is how many elements
batch_list/dict() pumps out before doing APPENDS/SETITEMS. Nothing will
break if this gets out of synch with pickle.py, but it's unclear that would
help anything either. */
BATCHSIZE = 1000,
/* Nesting limit until Pickler, when running in "fast mode", starts
checking for self-referential data-structures. */
FAST_NESTING_LIMIT = 50,
/* Initial size of the write buffer of Pickler. */
WRITE_BUF_SIZE = 4096,
/* Prefetch size when unpickling (disabled on unpeekable streams) */
PREFETCH = 8192 * 16,
FRAME_SIZE_MIN = 4,
FRAME_SIZE_TARGET = 64 * 1024,
FRAME_HEADER_SIZE = 9
};
/*************************************************************************/
/* State of the pickle module, per PEP 3121. */
typedef struct {
/* Exception classes for pickle. */
PyObject *PickleError;
PyObject *PicklingError;
PyObject *UnpicklingError;
/* copyreg.dispatch_table, {type_object: pickling_function} */
PyObject *dispatch_table;
/* For the extension opcodes EXT1, EXT2 and EXT4. */
/* copyreg._extension_registry, {(module_name, function_name): code} */
PyObject *extension_registry;
/* copyreg._extension_cache, {code: object} */
PyObject *extension_cache;
/* copyreg._inverted_registry, {code: (module_name, function_name)} */
PyObject *inverted_registry;
/* Import mappings for compatibility with Python 2.x */
/* _compat_pickle.NAME_MAPPING,
{(oldmodule, oldname): (newmodule, newname)} */
PyObject *name_mapping_2to3;
/* _compat_pickle.IMPORT_MAPPING, {oldmodule: newmodule} */
PyObject *import_mapping_2to3;
/* Same, but with REVERSE_NAME_MAPPING / REVERSE_IMPORT_MAPPING */
PyObject *name_mapping_3to2;
PyObject *import_mapping_3to2;
/* codecs.encode, used for saving bytes in older protocols */
PyObject *codecs_encode;
/* builtins.getattr, used for saving nested names with protocol < 4 */
PyObject *getattr;
/* functools.partial, used for implementing __newobj_ex__ with protocols
2 and 3 */
PyObject *partial;
} PickleState;
/* Forward declaration of the _pickle module definition. */
static struct PyModuleDef _picklemodule;
/* Given a module object, get its per-module state. */
static PickleState *
_Pickle_GetState(PyObject *module)
{
return (PickleState *)PyModule_GetState(module);
}
/* Find the module instance imported in the currently running sub-interpreter
and get its state. */
static PickleState *
_Pickle_GetGlobalState(void)
{
return _Pickle_GetState(PyState_FindModule(&_picklemodule));
}
/* Clear the given pickle module state. */
static void
_Pickle_ClearState(PickleState *st)
{
Py_CLEAR(st->PickleError);
Py_CLEAR(st->PicklingError);
Py_CLEAR(st->UnpicklingError);
Py_CLEAR(st->dispatch_table);
Py_CLEAR(st->extension_registry);
Py_CLEAR(st->extension_cache);
Py_CLEAR(st->inverted_registry);
Py_CLEAR(st->name_mapping_2to3);
Py_CLEAR(st->import_mapping_2to3);
Py_CLEAR(st->name_mapping_3to2);
Py_CLEAR(st->import_mapping_3to2);
Py_CLEAR(st->codecs_encode);
Py_CLEAR(st->getattr);
Py_CLEAR(st->partial);
}
/* Initialize the given pickle module state. */
static int
_Pickle_InitState(PickleState *st)
{
PyObject *builtins;
PyObject *copyreg = NULL;
PyObject *compat_pickle = NULL;
PyObject *codecs = NULL;
PyObject *functools = NULL;
builtins = PyEval_GetBuiltins();
if (builtins == NULL)
goto error;
st->getattr = PyDict_GetItemString(builtins, "getattr");
if (st->getattr == NULL)
goto error;
Py_INCREF(st->getattr);
copyreg = PyImport_ImportModule("copyreg");
if (!copyreg)
goto error;
st->dispatch_table = PyObject_GetAttrString(copyreg, "dispatch_table");
if (!st->dispatch_table)
goto error;
if (!PyDict_CheckExact(st->dispatch_table)) {
PyErr_Format(PyExc_RuntimeError,
"copyreg.dispatch_table should be a dict, not %.200s",
Py_TYPE(st->dispatch_table)->tp_name);
goto error;
}
st->extension_registry = \
PyObject_GetAttrString(copyreg, "_extension_registry");
if (!st->extension_registry)
goto error;
if (!PyDict_CheckExact(st->extension_registry)) {
PyErr_Format(PyExc_RuntimeError,
"copyreg._extension_registry should be a dict, "
"not %.200s", Py_TYPE(st->extension_registry)->tp_name);
goto error;
}
st->inverted_registry = \
PyObject_GetAttrString(copyreg, "_inverted_registry");
if (!st->inverted_registry)
goto error;
if (!PyDict_CheckExact(st->inverted_registry)) {
PyErr_Format(PyExc_RuntimeError,
"copyreg._inverted_registry should be a dict, "
"not %.200s", Py_TYPE(st->inverted_registry)->tp_name);
goto error;
}
st->extension_cache = PyObject_GetAttrString(copyreg, "_extension_cache");
if (!st->extension_cache)
goto error;
if (!PyDict_CheckExact(st->extension_cache)) {
PyErr_Format(PyExc_RuntimeError,
"copyreg._extension_cache should be a dict, "
"not %.200s", Py_TYPE(st->extension_cache)->tp_name);
goto error;
}
Py_CLEAR(copyreg);
/* Load the 2.x -> 3.x stdlib module mapping tables */
compat_pickle = PyImport_ImportModule("_compat_pickle");
if (!compat_pickle)
goto error;
st->name_mapping_2to3 = \
PyObject_GetAttrString(compat_pickle, "NAME_MAPPING");
if (!st->name_mapping_2to3)
goto error;
if (!PyDict_CheckExact(st->name_mapping_2to3)) {
PyErr_Format(PyExc_RuntimeError,
"_compat_pickle.NAME_MAPPING should be a dict, not %.200s",
Py_TYPE(st->name_mapping_2to3)->tp_name);
goto error;
}
st->import_mapping_2to3 = \
PyObject_GetAttrString(compat_pickle, "IMPORT_MAPPING");
if (!st->import_mapping_2to3)
goto error;
if (!PyDict_CheckExact(st->import_mapping_2to3)) {
PyErr_Format(PyExc_RuntimeError,
"_compat_pickle.IMPORT_MAPPING should be a dict, "
"not %.200s", Py_TYPE(st->import_mapping_2to3)->tp_name);
goto error;
}
/* ... and the 3.x -> 2.x mapping tables */
st->name_mapping_3to2 = \
PyObject_GetAttrString(compat_pickle, "REVERSE_NAME_MAPPING");
if (!st->name_mapping_3to2)
goto error;
if (!PyDict_CheckExact(st->name_mapping_3to2)) {
PyErr_Format(PyExc_RuntimeError,
"_compat_pickle.REVERSE_NAME_MAPPING should be a dict, "
"not %.200s", Py_TYPE(st->name_mapping_3to2)->tp_name);
goto error;
}
st->import_mapping_3to2 = \
PyObject_GetAttrString(compat_pickle, "REVERSE_IMPORT_MAPPING");
if (!st->import_mapping_3to2)
goto error;
if (!PyDict_CheckExact(st->import_mapping_3to2)) {
PyErr_Format(PyExc_RuntimeError,
"_compat_pickle.REVERSE_IMPORT_MAPPING should be a dict, "
"not %.200s", Py_TYPE(st->import_mapping_3to2)->tp_name);
goto error;
}
Py_CLEAR(compat_pickle);
codecs = PyImport_ImportModule("codecs");
if (codecs == NULL)
goto error;
st->codecs_encode = PyObject_GetAttrString(codecs, "encode");
if (st->codecs_encode == NULL) {
goto error;
}
if (!PyCallable_Check(st->codecs_encode)) {
PyErr_Format(PyExc_RuntimeError,
"codecs.encode should be a callable, not %.200s",
Py_TYPE(st->codecs_encode)->tp_name);
goto error;
}
Py_CLEAR(codecs);
functools = PyImport_ImportModule("functools");
if (!functools)
goto error;
st->partial = PyObject_GetAttrString(functools, "partial");
if (!st->partial)
goto error;
Py_CLEAR(functools);
return 0;
error:
Py_CLEAR(copyreg);
Py_CLEAR(compat_pickle);
Py_CLEAR(codecs);
Py_CLEAR(functools);
_Pickle_ClearState(st);
return -1;
}
/* Helper for calling a function with a single argument quickly.
This function steals the reference of the given argument. */
static PyObject *
_Pickle_FastCall(PyObject *func, PyObject *obj)
{
PyObject *result;
result = PyObject_CallFunctionObjArgs(func, obj, NULL);
Py_DECREF(obj);
return result;
}
/*************************************************************************/
/* Retrieve and deconstruct a method for avoiding a reference cycle
(pickler -> bound method of pickler -> pickler) */
static int
init_method_ref(PyObject *self, _Py_Identifier *name,
PyObject **method_func, PyObject **method_self)
{
PyObject *func, *func2;
int ret;
/* *method_func and *method_self should be consistent. All refcount decrements
should be occurred after setting *method_self and *method_func. */
ret = _PyObject_LookupAttrId(self, name, &func);
if (func == NULL) {
*method_self = NULL;
Py_CLEAR(*method_func);
return ret;
}
if (PyMethod_Check(func) && PyMethod_GET_SELF(func) == self) {
/* Deconstruct a bound Python method */
func2 = PyMethod_GET_FUNCTION(func);
Py_INCREF(func2);
*method_self = self; /* borrowed */
Py_XSETREF(*method_func, func2);
Py_DECREF(func);
return 0;
}
else {
*method_self = NULL;
Py_XSETREF(*method_func, func);
return 0;
}
}
/* Bind a method if it was deconstructed */
static PyObject *
reconstruct_method(PyObject *func, PyObject *self)
{
if (self) {
return PyMethod_New(func, self);
}
else {
Py_INCREF(func);
return func;
}
}
static PyObject *
call_method(PyObject *func, PyObject *self, PyObject *obj)
{
if (self) {
return PyObject_CallFunctionObjArgs(func, self, obj, NULL);
}
else {
return PyObject_CallFunctionObjArgs(func, obj, NULL);
}
}
/*************************************************************************/
/* Internal data type used as the unpickling stack. */
typedef struct {
PyObject_VAR_HEAD
PyObject **data;
int mark_set; /* is MARK set? */
Py_ssize_t fence; /* position of top MARK or 0 */
Py_ssize_t allocated; /* number of slots in data allocated */
} Pdata;
static void
Pdata_dealloc(Pdata *self)
{
Py_ssize_t i = Py_SIZE(self);
while (--i >= 0) {
Py_DECREF(self->data[i]);
}
PyMem_FREE(self->data);
PyObject_Del(self);
}
static PyTypeObject Pdata_Type = {
PyVarObject_HEAD_INIT(NULL, 0)
"_pickle.Pdata", /*tp_name*/
sizeof(Pdata), /*tp_basicsize*/
sizeof(PyObject *), /*tp_itemsize*/
(destructor)Pdata_dealloc, /*tp_dealloc*/
};
static PyObject *
Pdata_New(void)
{
Pdata *self;
if (!(self = PyObject_New(Pdata, &Pdata_Type)))
return NULL;
Py_SIZE(self) = 0;
self->mark_set = 0;
self->fence = 0;
self->allocated = 8;
self->data = PyMem_MALLOC(self->allocated * sizeof(PyObject *));
if (self->data)
return (PyObject *)self;
Py_DECREF(self);
return PyErr_NoMemory();
}
/* Retain only the initial clearto items. If clearto >= the current
* number of items, this is a (non-erroneous) NOP.
*/
static int
Pdata_clear(Pdata *self, Py_ssize_t clearto)
{
Py_ssize_t i = Py_SIZE(self);
assert(clearto >= self->fence);
if (clearto >= i)
return 0;
while (--i >= clearto) {
Py_CLEAR(self->data[i]);
}
Py_SIZE(self) = clearto;
return 0;
}
static int
Pdata_grow(Pdata *self)
{
PyObject **data = self->data;
size_t allocated = (size_t)self->allocated;
size_t new_allocated;
new_allocated = (allocated >> 3) + 6;
/* check for integer overflow */
if (new_allocated > (size_t)PY_SSIZE_T_MAX - allocated)
goto nomemory;
new_allocated += allocated;
PyMem_RESIZE(data, PyObject *, new_allocated);
if (data == NULL)
goto nomemory;
self->data = data;
self->allocated = (Py_ssize_t)new_allocated;
return 0;
nomemory:
PyErr_NoMemory();
return -1;
}
static int
Pdata_stack_underflow(Pdata *self)
{
PickleState *st = _Pickle_GetGlobalState();
PyErr_SetString(st->UnpicklingError,
self->mark_set ?
"unexpected MARK found" :
"unpickling stack underflow");
return -1;
}
/* D is a Pdata*. Pop the topmost element and store it into V, which
* must be an lvalue holding PyObject*. On stack underflow, UnpicklingError
* is raised and V is set to NULL.
*/
static PyObject *
Pdata_pop(Pdata *self)
{
if (Py_SIZE(self) <= self->fence) {
Pdata_stack_underflow(self);
return NULL;
}
return self->data[--Py_SIZE(self)];
}
#define PDATA_POP(D, V) do { (V) = Pdata_pop((D)); } while (0)
static int
Pdata_push(Pdata *self, PyObject *obj)
{
if (Py_SIZE(self) == self->allocated && Pdata_grow(self) < 0) {
return -1;
}
self->data[Py_SIZE(self)++] = obj;
return 0;
}
/* Push an object on stack, transferring its ownership to the stack. */
#define PDATA_PUSH(D, O, ER) do { \
if (Pdata_push((D), (O)) < 0) return (ER); } while(0)
/* Push an object on stack, adding a new reference to the object. */
#define PDATA_APPEND(D, O, ER) do { \
Py_INCREF((O)); \
if (Pdata_push((D), (O)) < 0) return (ER); } while(0)
static PyObject *
Pdata_poptuple(Pdata *self, Py_ssize_t start)
{
PyObject *tuple;
Py_ssize_t len, i, j;
if (start < self->fence) {
Pdata_stack_underflow(self);
return NULL;
}
len = Py_SIZE(self) - start;
tuple = PyTuple_New(len);
if (tuple == NULL)
return NULL;
for (i = start, j = 0; j < len; i++, j++)
PyTuple_SET_ITEM(tuple, j, self->data[i]);
Py_SIZE(self) = start;
return tuple;
}
static PyObject *
Pdata_poplist(Pdata *self, Py_ssize_t start)
{
PyObject *list;
Py_ssize_t len, i, j;
len = Py_SIZE(self) - start;
list = PyList_New(len);
if (list == NULL)
return NULL;
for (i = start, j = 0; j < len; i++, j++)
PyList_SET_ITEM(list, j, self->data[i]);
Py_SIZE(self) = start;
return list;
}
typedef struct {
PyObject *me_key;
Py_ssize_t me_value;
} PyMemoEntry;
typedef struct {
Py_ssize_t mt_mask;
Py_ssize_t mt_used;
Py_ssize_t mt_allocated;
PyMemoEntry *mt_table;
} PyMemoTable;
typedef struct PicklerObject {
PyObject_HEAD
PyMemoTable *memo; /* Memo table, keep track of the seen
objects to support self-referential objects
pickling. */
PyObject *pers_func; /* persistent_id() method, can be NULL */
PyObject *pers_func_self; /* borrowed reference to self if pers_func
is an unbound method, NULL otherwise */
PyObject *dispatch_table; /* private dispatch_table, can be NULL */
PyObject *write; /* write() method of the output stream. */
PyObject *output_buffer; /* Write into a local bytearray buffer before
flushing to the stream. */
Py_ssize_t output_len; /* Length of output_buffer. */
Py_ssize_t max_output_len; /* Allocation size of output_buffer. */
int proto; /* Pickle protocol number, >= 0 */
int bin; /* Boolean, true if proto > 0 */
int framing; /* True when framing is enabled, proto >= 4 */
Py_ssize_t frame_start; /* Position in output_buffer where the
current frame begins. -1 if there
is no frame currently open. */
Py_ssize_t buf_size; /* Size of the current buffered pickle data */
int fast; /* Enable fast mode if set to a true value.
The fast mode disable the usage of memo,
therefore speeding the pickling process by
not generating superfluous PUT opcodes. It
should not be used if with self-referential
objects. */
int fast_nesting;
int fix_imports; /* Indicate whether Pickler should fix
the name of globals for Python 2.x. */
PyObject *fast_memo;
} PicklerObject;
typedef struct UnpicklerObject {
PyObject_HEAD
Pdata *stack; /* Pickle data stack, store unpickled objects. */
/* The unpickler memo is just an array of PyObject *s. Using a dict
is unnecessary, since the keys are contiguous ints. */
PyObject **memo;
Py_ssize_t memo_size; /* Capacity of the memo array */
Py_ssize_t memo_len; /* Number of objects in the memo */
PyObject *pers_func; /* persistent_load() method, can be NULL. */
PyObject *pers_func_self; /* borrowed reference to self if pers_func
is an unbound method, NULL otherwise */
Py_buffer buffer;
char *input_buffer;
char *input_line;
Py_ssize_t input_len;
Py_ssize_t next_read_idx;
Py_ssize_t prefetched_idx; /* index of first prefetched byte */
PyObject *read; /* read() method of the input stream. */
PyObject *readline; /* readline() method of the input stream. */
PyObject *peek; /* peek() method of the input stream, or NULL */
char *encoding; /* Name of the encoding to be used for
decoding strings pickled using Python
2.x. The default value is "ASCII" */
char *errors; /* Name of errors handling scheme to used when
decoding strings. The default value is
"strict". */
Py_ssize_t *marks; /* Mark stack, used for unpickling container
objects. */
Py_ssize_t num_marks; /* Number of marks in the mark stack. */
Py_ssize_t marks_size; /* Current allocated size of the mark stack. */
int proto; /* Protocol of the pickle loaded. */
int fix_imports; /* Indicate whether Unpickler should fix
the name of globals pickled by Python 2.x. */
} UnpicklerObject;
typedef struct {
PyObject_HEAD
PicklerObject *pickler; /* Pickler whose memo table we're proxying. */
} PicklerMemoProxyObject;
typedef struct {
PyObject_HEAD
UnpicklerObject *unpickler;
} UnpicklerMemoProxyObject;
/* Forward declarations */
static int save(PicklerObject *, PyObject *, int);
static int save_reduce(PicklerObject *, PyObject *, PyObject *);
static PyTypeObject Pickler_Type;
static PyTypeObject Unpickler_Type;
#include "clinic/_pickle.c.h"
/*************************************************************************
A custom hashtable mapping void* to Python ints. This is used by the pickler
for memoization. Using a custom hashtable rather than PyDict allows us to skip
a bunch of unnecessary object creation. This makes a huge performance
difference. */
#define MT_MINSIZE 8
#define PERTURB_SHIFT 5
static PyMemoTable *
PyMemoTable_New(void)
{
PyMemoTable *memo = PyMem_MALLOC(sizeof(PyMemoTable));
if (memo == NULL) {
PyErr_NoMemory();
return NULL;
}
memo->mt_used = 0;
memo->mt_allocated = MT_MINSIZE;
memo->mt_mask = MT_MINSIZE - 1;
memo->mt_table = PyMem_MALLOC(MT_MINSIZE * sizeof(PyMemoEntry));
if (memo->mt_table == NULL) {
PyMem_FREE(memo);
PyErr_NoMemory();
return NULL;
}
memset(memo->mt_table, 0, MT_MINSIZE * sizeof(PyMemoEntry));
return memo;
}
static PyMemoTable *
PyMemoTable_Copy(PyMemoTable *self)
{
Py_ssize_t i;
PyMemoTable *new = PyMemoTable_New();
if (new == NULL)
return NULL;
new->mt_used = self->mt_used;
new->mt_allocated = self->mt_allocated;
new->mt_mask = self->mt_mask;
/* The table we get from _New() is probably smaller than we wanted.
Free it and allocate one that's the right size. */
PyMem_FREE(new->mt_table);
new->mt_table = PyMem_NEW(PyMemoEntry, self->mt_allocated);
if (new->mt_table == NULL) {
PyMem_FREE(new);
PyErr_NoMemory();
return NULL;
}
for (i = 0; i < self->mt_allocated; i++) {
Py_XINCREF(self->mt_table[i].me_key);
}
memcpy(new->mt_table, self->mt_table,
sizeof(PyMemoEntry) * self->mt_allocated);
return new;
}
static Py_ssize_t
PyMemoTable_Size(PyMemoTable *self)
{
return self->mt_used;
}
static int
PyMemoTable_Clear(PyMemoTable *self)
{
Py_ssize_t i = self->mt_allocated;
while (--i >= 0) {
Py_XDECREF(self->mt_table[i].me_key);
}
self->mt_used = 0;
memset(self->mt_table, 0, self->mt_allocated * sizeof(PyMemoEntry));
return 0;
}
static void
PyMemoTable_Del(PyMemoTable *self)
{
if (self == NULL)
return;
PyMemoTable_Clear(self);
PyMem_FREE(self->mt_table);
PyMem_FREE(self);
}
/* Since entries cannot be deleted from this hashtable, _PyMemoTable_Lookup()
can be considerably simpler than dictobject.c's lookdict(). */
static PyMemoEntry *
_PyMemoTable_Lookup(PyMemoTable *self, PyObject *key)
{
size_t i;
size_t perturb;
size_t mask = (size_t)self->mt_mask;
PyMemoEntry *table = self->mt_table;
PyMemoEntry *entry;
Py_hash_t hash = (Py_hash_t)key >> 3;
i = hash & mask;
entry = &table[i];
if (entry->me_key == NULL || entry->me_key == key)
return entry;
for (perturb = hash; ; perturb >>= PERTURB_SHIFT) {
i = (i << 2) + i + perturb + 1;
entry = &table[i & mask];
if (entry->me_key == NULL || entry->me_key == key)
return entry;
}
Py_UNREACHABLE();
}
/* Returns -1 on failure, 0 on success. */
static int
_PyMemoTable_ResizeTable(PyMemoTable *self, Py_ssize_t min_size)
{
PyMemoEntry *oldtable = NULL;
PyMemoEntry *oldentry, *newentry;
Py_ssize_t new_size = MT_MINSIZE;
Py_ssize_t to_process;
assert(min_size > 0);
/* Find the smallest valid table size >= min_size. */
while (new_size < min_size && new_size > 0)
new_size <<= 1;
if (new_size <= 0) {
PyErr_NoMemory();
return -1;
}
/* new_size needs to be a power of two. */
assert((new_size & (new_size - 1)) == 0);
/* Allocate new table. */
oldtable = self->mt_table;
self->mt_table = PyMem_NEW(PyMemoEntry, new_size);
if (self->mt_table == NULL) {
self->mt_table = oldtable;
PyErr_NoMemory();
return -1;
}
self->mt_allocated = new_size;
self->mt_mask = new_size - 1;
memset(self->mt_table, 0, sizeof(PyMemoEntry) * new_size);
/* Copy entries from the old table. */
to_process = self->mt_used;
for (oldentry = oldtable; to_process > 0; oldentry++) {
if (oldentry->me_key != NULL) {
to_process--;
/* newentry is a pointer to a chunk of the new
mt_table, so we're setting the key:value pair
in-place. */
newentry = _PyMemoTable_Lookup(self, oldentry->me_key);
newentry->me_key = oldentry->me_key;
newentry->me_value = oldentry->me_value;
}
}
/* Deallocate the old table. */
PyMem_FREE(oldtable);
return 0;
}
/* Returns NULL on failure, a pointer to the value otherwise. */
static Py_ssize_t *
PyMemoTable_Get(PyMemoTable *self, PyObject *key)
{
PyMemoEntry *entry = _PyMemoTable_Lookup(self, key);
if (entry->me_key == NULL)
return NULL;
return &entry->me_value;
}
/* Returns -1 on failure, 0 on success. */
static int
PyMemoTable_Set(PyMemoTable *self, PyObject *key, Py_ssize_t value)
{
PyMemoEntry *entry;
assert(key != NULL);
entry = _PyMemoTable_Lookup(self, key);
if (entry->me_key != NULL) {
entry->me_value = value;
return 0;
}
Py_INCREF(key);
entry->me_key = key;
entry->me_value = value;
self->mt_used++;
/* If we added a key, we can safely resize. Otherwise just return!
* If used >= 2/3 size, adjust size. Normally, this quaduples the size.
*
* Quadrupling the size improves average table sparseness
* (reducing collisions) at the cost of some memory. It also halves
* the number of expensive resize operations in a growing memo table.
*
* Very large memo tables (over 50K items) use doubling instead.
* This may help applications with severe memory constraints.
*/
if (!(self->mt_used * 3 >= (self->mt_mask + 1) * 2))
return 0;
return _PyMemoTable_ResizeTable(self,
(self->mt_used > 50000 ? 2 : 4) * self->mt_used);
}
#undef MT_MINSIZE
#undef PERTURB_SHIFT
/*************************************************************************/
static int
_Pickler_ClearBuffer(PicklerObject *self)
{
Py_XSETREF(self->output_buffer,
PyBytes_FromStringAndSize(NULL, self->max_output_len));
if (self->output_buffer == NULL)
return -1;
self->output_len = 0;
self->frame_start = -1;
return 0;
}
static void
_write_size64(char *out, size_t value)
{
size_t i;
Py_BUILD_ASSERT(sizeof(size_t) <= 8);
for (i = 0; i < sizeof(size_t); i++) {
out[i] = (unsigned char)((value >> (8 * i)) & 0xff);
}
for (i = sizeof(size_t); i < 8; i++) {
out[i] = 0;
}
}
static int
_Pickler_CommitFrame(PicklerObject *self)
{
size_t frame_len;
char *qdata;
if (!self->framing || self->frame_start == -1)
return 0;
frame_len = self->output_len - self->frame_start - FRAME_HEADER_SIZE;
qdata = PyBytes_AS_STRING(self->output_buffer) + self->frame_start;
if (frame_len >= FRAME_SIZE_MIN) {
qdata[0] = FRAME;
_write_size64(qdata + 1, frame_len);
}
else {
memmove(qdata, qdata + FRAME_HEADER_SIZE, frame_len);
self->output_len -= FRAME_HEADER_SIZE;
}
self->frame_start = -1;
return 0;
}
static PyObject *
_Pickler_GetString(PicklerObject *self)
{
PyObject *output_buffer = self->output_buffer;
assert(self->output_buffer != NULL);
if (_Pickler_CommitFrame(self))
return NULL;
self->output_buffer = NULL;
/* Resize down to exact size */
if (_PyBytes_Resize(&output_buffer, self->output_len) < 0)
return NULL;
return output_buffer;
}
static int
_Pickler_FlushToFile(PicklerObject *self)
{
PyObject *output, *result;
assert(self->write != NULL);
/* This will commit the frame first */
output = _Pickler_GetString(self);
if (output == NULL)
return -1;
result = _Pickle_FastCall(self->write, output);
Py_XDECREF(result);
return (result == NULL) ? -1 : 0;
}
static int
_Pickler_OpcodeBoundary(PicklerObject *self)
{
Py_ssize_t frame_len;
if (!self->framing || self->frame_start == -1) {
return 0;
}
frame_len = self->output_len - self->frame_start - FRAME_HEADER_SIZE;
if (frame_len >= FRAME_SIZE_TARGET) {
if(_Pickler_CommitFrame(self)) {
return -1;
}
/* Flush the content of the committed frame to the underlying
* file and reuse the pickler buffer for the next frame so as
* to limit memory usage when dumping large complex objects to
* a file.
*
* self->write is NULL when called via dumps.
*/
if (self->write != NULL) {
if (_Pickler_FlushToFile(self) < 0) {
return -1;
}
if (_Pickler_ClearBuffer(self) < 0) {
return -1;
}
}
}
return 0;
}
static Py_ssize_t
_Pickler_Write(PicklerObject *self, const char *s, Py_ssize_t data_len)
{
Py_ssize_t i, n, required;
char *buffer;
int need_new_frame;
assert(s != NULL);
need_new_frame = (self->framing && self->frame_start == -1);
if (need_new_frame)
n = data_len + FRAME_HEADER_SIZE;
else
n = data_len;
required = self->output_len + n;
if (required > self->max_output_len) {
/* Make place in buffer for the pickle chunk */
if (self->output_len >= PY_SSIZE_T_MAX / 2 - n) {
PyErr_NoMemory();
return -1;
}
self->max_output_len = (self->output_len + n) / 2 * 3;
if (_PyBytes_Resize(&self->output_buffer, self->max_output_len) < 0)
return -1;
}
buffer = PyBytes_AS_STRING(self->output_buffer);
if (need_new_frame) {
/* Setup new frame */
Py_ssize_t frame_start = self->output_len;
self->frame_start = frame_start;
for (i = 0; i < FRAME_HEADER_SIZE; i++) {
/* Write an invalid value, for debugging */
buffer[frame_start + i] = 0xFE;
}
self->output_len += FRAME_HEADER_SIZE;
}
if (data_len < 8) {
/* This is faster than memcpy when the string is short. */
for (i = 0; i < data_len; i++) {
buffer[self->output_len + i] = s[i];
}
}
else {
memcpy(buffer + self->output_len, s, data_len);
}
self->output_len += data_len;
return data_len;
}
static PicklerObject *
_Pickler_New(void)
{
PicklerObject *self;
self = PyObject_GC_New(PicklerObject, &Pickler_Type);
if (self == NULL)
return NULL;
self->pers_func = NULL;
self->dispatch_table = NULL;
self->write = NULL;
self->proto = 0;
self->bin = 0;
self->framing = 0;
self->frame_start = -1;
self->fast = 0;
self->fast_nesting = 0;
self->fix_imports = 0;
self->fast_memo = NULL;
self->max_output_len = WRITE_BUF_SIZE;
self->output_len = 0;
self->memo = PyMemoTable_New();
self->output_buffer = PyBytes_FromStringAndSize(NULL,
self->max_output_len);
if (self->memo == NULL || self->output_buffer == NULL) {
Py_DECREF(self);
return NULL;
}
return self;
}
static int
_Pickler_SetProtocol(PicklerObject *self, PyObject *protocol, int fix_imports)
{
long proto;
if (protocol == NULL || protocol == Py_None) {
proto = DEFAULT_PROTOCOL;
}
else {
proto = PyLong_AsLong(protocol);
if (proto < 0) {
if (proto == -1 && PyErr_Occurred())
return -1;
proto = HIGHEST_PROTOCOL;
}
else if (proto > HIGHEST_PROTOCOL) {
PyErr_Format(PyExc_ValueError, "pickle protocol must be <= %d",
HIGHEST_PROTOCOL);
return -1;
}
}
self->proto = (int)proto;
self->bin = proto > 0;
self->fix_imports = fix_imports && proto < 3;
return 0;
}
/* Returns -1 (with an exception set) on failure, 0 on success. This may
be called once on a freshly created Pickler. */
static int
_Pickler_SetOutputStream(PicklerObject *self, PyObject *file)
{
_Py_IDENTIFIER(write);
assert(file != NULL);
if (_PyObject_LookupAttrId(file, &PyId_write, &self->write) < 0) {
return -1;
}
if (self->write == NULL) {
PyErr_SetString(PyExc_TypeError,
"file must have a 'write' attribute");
return -1;
}
return 0;
}
/* Returns the size of the input on success, -1 on failure. This takes its
own reference to `input`. */
static Py_ssize_t
_Unpickler_SetStringInput(UnpicklerObject *self, PyObject *input)
{
if (self->buffer.buf != NULL)
PyBuffer_Release(&self->buffer);
if (PyObject_GetBuffer(input, &self->buffer, PyBUF_CONTIG_RO) < 0)
return -1;
self->input_buffer = self->buffer.buf;
self->input_len = self->buffer.len;
self->next_read_idx = 0;
self->prefetched_idx = self->input_len;
return self->input_len;
}
static int
bad_readline(void)
{
PickleState *st = _Pickle_GetGlobalState();
PyErr_SetString(st->UnpicklingError, "pickle data was truncated");
return -1;
}
static int
_Unpickler_SkipConsumed(UnpicklerObject *self)
{
Py_ssize_t consumed;
PyObject *r;
consumed = self->next_read_idx - self->prefetched_idx;
if (consumed <= 0)
return 0;
assert(self->peek); /* otherwise we did something wrong */
/* This makes a useless copy... */
r = PyObject_CallFunction(self->read, "n", consumed);
if (r == NULL)
return -1;
Py_DECREF(r);
self->prefetched_idx = self->next_read_idx;
return 0;
}
static const Py_ssize_t READ_WHOLE_LINE = -1;
/* If reading from a file, we need to only pull the bytes we need, since there
may be multiple pickle objects arranged contiguously in the same input
buffer.
If `n` is READ_WHOLE_LINE, read a whole line. Otherwise, read up to `n`
bytes from the input stream/buffer.
Update the unpickler's input buffer with the newly-read data. Returns -1 on
failure; on success, returns the number of bytes read from the file.
On success, self->input_len will be 0; this is intentional so that when
unpickling from a file, the "we've run out of data" code paths will trigger,
causing the Unpickler to go back to the file for more data. Use the returned
size to tell you how much data you can process. */
static Py_ssize_t
_Unpickler_ReadFromFile(UnpicklerObject *self, Py_ssize_t n)
{
PyObject *data;
Py_ssize_t read_size;
assert(self->read != NULL);
if (_Unpickler_SkipConsumed(self) < 0)
return -1;
if (n == READ_WHOLE_LINE) {
data = _PyObject_CallNoArg(self->readline);
}
else {
PyObject *len;
/* Prefetch some data without advancing the file pointer, if possible */
if (self->peek && n < PREFETCH) {
len = PyLong_FromSsize_t(PREFETCH);
if (len == NULL)
return -1;
data = _Pickle_FastCall(self->peek, len);
if (data == NULL) {
if (!PyErr_ExceptionMatches(PyExc_NotImplementedError))
return -1;
/* peek() is probably not supported by the given file object */
PyErr_Clear();
Py_CLEAR(self->peek);
}
else {
read_size = _Unpickler_SetStringInput(self, data);
Py_DECREF(data);
self->prefetched_idx = 0;
if (n <= read_size)
return n;
}
}
len = PyLong_FromSsize_t(n);
if (len == NULL)
return -1;
data = _Pickle_FastCall(self->read, len);
}
if (data == NULL)
return -1;
read_size = _Unpickler_SetStringInput(self, data);
Py_DECREF(data);
return read_size;
}
/* Don't call it directly: use _Unpickler_Read() */
static Py_ssize_t
_Unpickler_ReadImpl(UnpicklerObject *self, char **s, Py_ssize_t n)
{
Py_ssize_t num_read;
*s = NULL;
if (self->next_read_idx > PY_SSIZE_T_MAX - n) {
PickleState *st = _Pickle_GetGlobalState();
PyErr_SetString(st->UnpicklingError,
"read would overflow (invalid bytecode)");
return -1;
}
/* This case is handled by the _Unpickler_Read() macro for efficiency */
assert(self->next_read_idx + n > self->input_len);
if (!self->read)
return bad_readline();
num_read = _Unpickler_ReadFromFile(self, n);
if (num_read < 0)
return -1;
if (num_read < n)
return bad_readline();
*s = self->input_buffer;
self->next_read_idx = n;
return n;
}
/* Read `n` bytes from the unpickler's data source, storing the result in `*s`.
This should be used for all data reads, rather than accessing the unpickler's
input buffer directly. This method deals correctly with reading from input
streams, which the input buffer doesn't deal with.
Note that when reading from a file-like object, self->next_read_idx won't
be updated (it should remain at 0 for the entire unpickling process). You
should use this function's return value to know how many bytes you can
consume.
Returns -1 (with an exception set) on failure. On success, return the
number of chars read. */
#define _Unpickler_Read(self, s, n) \
(((n) <= (self)->input_len - (self)->next_read_idx) \
? (*(s) = (self)->input_buffer + (self)->next_read_idx, \
(self)->next_read_idx += (n), \
(n)) \
: _Unpickler_ReadImpl(self, (s), (n)))
static Py_ssize_t
_Unpickler_CopyLine(UnpicklerObject *self, char *line, Py_ssize_t len,
char **result)
{
char *input_line = PyMem_Realloc(self->input_line, len + 1);
if (input_line == NULL) {
PyErr_NoMemory();
return -1;
}
memcpy(input_line, line, len);
input_line[len] = '\0';
self->input_line = input_line;
*result = self->input_line;
return len;
}
/* Read a line from the input stream/buffer. If we run off the end of the input
before hitting \n, raise an error.
Returns the number of chars read, or -1 on failure. */
static Py_ssize_t
_Unpickler_Readline(UnpicklerObject *self, char **result)
{
Py_ssize_t i, num_read;
for (i = self->next_read_idx; i < self->input_len; i++) {
if (self->input_buffer[i] == '\n') {
char *line_start = self->input_buffer + self->next_read_idx;
num_read = i - self->next_read_idx + 1;
self->next_read_idx = i + 1;
return _Unpickler_CopyLine(self, line_start, num_read, result);
}
}
if (!self->read)
return bad_readline();
num_read = _Unpickler_ReadFromFile(self, READ_WHOLE_LINE);
if (num_read < 0)
return -1;
if (num_read == 0 || self->input_buffer[num_read - 1] != '\n')
return bad_readline();
self->next_read_idx = num_read;
return _Unpickler_CopyLine(self, self->input_buffer, num_read, result);
}
/* Returns -1 (with an exception set) on failure, 0 on success. The memo array
will be modified in place. */
static int
_Unpickler_ResizeMemoList(UnpicklerObject *self, Py_ssize_t new_size)
{
Py_ssize_t i;
assert(new_size > self->memo_size);
PyMem_RESIZE(self->memo, PyObject *, new_size);
if (self->memo == NULL) {
PyErr_NoMemory();
return -1;
}
for (i = self->memo_size; i < new_size; i++)
self->memo[i] = NULL;
self->memo_size = new_size;
return 0;
}
/* Returns NULL if idx is out of bounds. */
static PyObject *
_Unpickler_MemoGet(UnpicklerObject *self, Py_ssize_t idx)
{
if (idx < 0 || idx >= self->memo_size)
return NULL;
return self->memo[idx];
}
/* Returns -1 (with an exception set) on failure, 0 on success.
This takes its own reference to `value`. */
static int
_Unpickler_MemoPut(UnpicklerObject *self, Py_ssize_t idx, PyObject *value)
{
PyObject *old_item;
if (idx >= self->memo_size) {
if (_Unpickler_ResizeMemoList(self, idx * 2) < 0)
return -1;
assert(idx < self->memo_size);
}
Py_INCREF(value);
old_item = self->memo[idx];
self->memo[idx] = value;
if (old_item != NULL) {
Py_DECREF(old_item);
}
else {
self->memo_len++;
}
return 0;
}
static PyObject **
_Unpickler_NewMemo(Py_ssize_t new_size)
{
PyObject **memo = PyMem_NEW(PyObject *, new_size);
if (memo == NULL) {
PyErr_NoMemory();
return NULL;
}
memset(memo, 0, new_size * sizeof(PyObject *));
return memo;
}
/* Free the unpickler's memo, taking care to decref any items left in it. */
static void
_Unpickler_MemoCleanup(UnpicklerObject *self)
{
Py_ssize_t i;
PyObject **memo = self->memo;
if (self->memo == NULL)
return;
self->memo = NULL;
i = self->memo_size;
while (--i >= 0) {
Py_XDECREF(memo[i]);
}
PyMem_FREE(memo);
}
static UnpicklerObject *
_Unpickler_New(void)
{
UnpicklerObject *self;
self = PyObject_GC_New(UnpicklerObject, &Unpickler_Type);
if (self == NULL)
return NULL;
self->pers_func = NULL;
self->input_buffer = NULL;
self->input_line = NULL;
self->input_len = 0;
self->next_read_idx = 0;
self->prefetched_idx = 0;
self->read = NULL;
self->readline = NULL;
self->peek = NULL;
self->encoding = NULL;
self->errors = NULL;
self->marks = NULL;
self->num_marks = 0;
self->marks_size = 0;
self->proto = 0;
self->fix_imports = 0;
memset(&self->buffer, 0, sizeof(Py_buffer));
self->memo_size = 32;
self->memo_len = 0;
self->memo = _Unpickler_NewMemo(self->memo_size);
self->stack = (Pdata *)Pdata_New();
if (self->memo == NULL || self->stack == NULL) {
Py_DECREF(self);
return NULL;
}
return self;
}
/* Returns -1 (with an exception set) on failure, 0 on success. This may
be called once on a freshly created Pickler. */
static int
_Unpickler_SetInputStream(UnpicklerObject *self, PyObject *file)
{
_Py_IDENTIFIER(peek);
_Py_IDENTIFIER(read);
_Py_IDENTIFIER(readline);
if (_PyObject_LookupAttrId(file, &PyId_peek, &self->peek) < 0) {
return -1;
}
(void)_PyObject_LookupAttrId(file, &PyId_read, &self->read);
(void)_PyObject_LookupAttrId(file, &PyId_readline, &self->readline);
if (self->readline == NULL || self->read == NULL) {
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_TypeError,
"file must have 'read' and 'readline' attributes");
}
Py_CLEAR(self->read);
Py_CLEAR(self->readline);
Py_CLEAR(self->peek);
return -1;
}
return 0;
}
/* Returns -1 (with an exception set) on failure, 0 on success. This may
be called once on a freshly created Pickler. */
static int
_Unpickler_SetInputEncoding(UnpicklerObject *self,
const char *encoding,
const char *errors)
{
if (encoding == NULL)
encoding = "ASCII";
if (errors == NULL)
errors = "strict";
self->encoding = _PyMem_Strdup(encoding);
self->errors = _PyMem_Strdup(errors);
if (self->encoding == NULL || self->errors == NULL) {
PyErr_NoMemory();
return -1;
}
return 0;
}
/* Generate a GET opcode for an object stored in the memo. */
static int
memo_get(PicklerObject *self, PyObject *key)
{
Py_ssize_t *value;
char pdata[30];
Py_ssize_t len;
value = PyMemoTable_Get(self->memo, key);
if (value == NULL) {
PyErr_SetObject(PyExc_KeyError, key);
return -1;
}
if (!self->bin) {
pdata[0] = GET;
PyOS_snprintf(pdata + 1, sizeof(pdata) - 1,
"%" PY_FORMAT_SIZE_T "d\n", *value);
len = strlen(pdata);
}
else {
if (*value < 256) {
pdata[0] = BINGET;
pdata[1] = (unsigned char)(*value & 0xff);
len = 2;
}
else if ((size_t)*value <= 0xffffffffUL) {
pdata[0] = LONG_BINGET;
pdata[1] = (unsigned char)(*value & 0xff);
pdata[2] = (unsigned char)((*value >> 8) & 0xff);
pdata[3] = (unsigned char)((*value >> 16) & 0xff);
pdata[4] = (unsigned char)((*value >> 24) & 0xff);
len = 5;
}
else { /* unlikely */
PickleState *st = _Pickle_GetGlobalState();
PyErr_SetString(st->PicklingError,
"memo id too large for LONG_BINGET");
return -1;
}
}
if (_Pickler_Write(self, pdata, len) < 0)
return -1;
return 0;
}
/* Store an object in the memo, assign it a new unique ID based on the number
of objects currently stored in the memo and generate a PUT opcode. */
static int
memo_put(PicklerObject *self, PyObject *obj)
{
char pdata[30];
Py_ssize_t len;
Py_ssize_t idx;
const char memoize_op = MEMOIZE;
if (self->fast)
return 0;
idx = PyMemoTable_Size(self->memo);
if (PyMemoTable_Set(self->memo, obj, idx) < 0)
return -1;
if (self->proto >= 4) {
if (_Pickler_Write(self, &memoize_op, 1) < 0)
return -1;
return 0;
}
else if (!self->bin) {
pdata[0] = PUT;
PyOS_snprintf(pdata + 1, sizeof(pdata) - 1,
"%" PY_FORMAT_SIZE_T "d\n", idx);
len = strlen(pdata);
}
else {
if (idx < 256) {
pdata[0] = BINPUT;
pdata[1] = (unsigned char)idx;
len = 2;
}
else if ((size_t)idx <= 0xffffffffUL) {
pdata[0] = LONG_BINPUT;
pdata[1] = (unsigned char)(idx & 0xff);
pdata[2] = (unsigned char)((idx >> 8) & 0xff);
pdata[3] = (unsigned char)((idx >> 16) & 0xff);
pdata[4] = (unsigned char)((idx >> 24) & 0xff);
len = 5;
}
else { /* unlikely */
PickleState *st = _Pickle_GetGlobalState();
PyErr_SetString(st->PicklingError,
"memo id too large for LONG_BINPUT");
return -1;
}
}
if (_Pickler_Write(self, pdata, len) < 0)
return -1;
return 0;
}
static PyObject *
get_dotted_path(PyObject *obj, PyObject *name)
{
_Py_static_string(PyId_dot, ".");
PyObject *dotted_path;
Py_ssize_t i, n;
dotted_path = PyUnicode_Split(name, _PyUnicode_FromId(&PyId_dot), -1);
if (dotted_path == NULL)
return NULL;
n = PyList_GET_SIZE(dotted_path);
assert(n >= 1);
for (i = 0; i < n; i++) {
PyObject *subpath = PyList_GET_ITEM(dotted_path, i);
if (_PyUnicode_EqualToASCIIString(subpath, "<locals>")) {
if (obj == NULL)
PyErr_Format(PyExc_AttributeError,
"Can't pickle local object %R", name);
else
PyErr_Format(PyExc_AttributeError,
"Can't pickle local attribute %R on %R", name, obj);
Py_DECREF(dotted_path);
return NULL;
}
}
return dotted_path;
}
static PyObject *
get_deep_attribute(PyObject *obj, PyObject *names, PyObject **pparent)
{
Py_ssize_t i, n;
PyObject *parent = NULL;
assert(PyList_CheckExact(names));
Py_INCREF(obj);
n = PyList_GET_SIZE(names);
for (i = 0; i < n; i++) {
PyObject *name = PyList_GET_ITEM(names, i);
Py_XDECREF(parent);
parent = obj;
(void)_PyObject_LookupAttr(parent, name, &obj);
if (obj == NULL) {
Py_DECREF(parent);
return NULL;
}
}
if (pparent != NULL)
*pparent = parent;
else
Py_XDECREF(parent);
return obj;
}
static PyObject *
getattribute(PyObject *obj, PyObject *name, int allow_qualname)
{
PyObject *dotted_path, *attr;
if (allow_qualname) {
dotted_path = get_dotted_path(obj, name);
if (dotted_path == NULL)
return NULL;
attr = get_deep_attribute(obj, dotted_path, NULL);
Py_DECREF(dotted_path);
}
else {
(void)_PyObject_LookupAttr(obj, name, &attr);
}
if (attr == NULL && !PyErr_Occurred()) {
PyErr_Format(PyExc_AttributeError,
"Can't get attribute %R on %R", name, obj);
}
return attr;
}
static int
_checkmodule(PyObject *module_name, PyObject *module,
PyObject *global, PyObject *dotted_path)
{
if (module == Py_None) {
return -1;
}
if (PyUnicode_Check(module_name) &&
_PyUnicode_EqualToASCIIString(module_name, "__main__")) {
return -1;
}
PyObject *candidate = get_deep_attribute(module, dotted_path, NULL);
if (candidate == NULL) {
return -1;
}
if (candidate != global) {
Py_DECREF(candidate);
return -1;
}
Py_DECREF(candidate);
return 0;
}
static PyObject *
whichmodule(PyObject *global, PyObject *dotted_path)
{
PyObject *module_name;
PyObject *module = NULL;
Py_ssize_t i;
PyObject *modules;
_Py_IDENTIFIER(__module__);
_Py_IDENTIFIER(modules);
_Py_IDENTIFIER(__main__);
if (_PyObject_LookupAttrId(global, &PyId___module__, &module_name) < 0) {
return NULL;
}
if (module_name) {
/* In some rare cases (e.g., bound methods of extension types),
__module__ can be None. If it is so, then search sys.modules for
the module of global. */
if (module_name != Py_None)
return module_name;
Py_CLEAR(module_name);
}
assert(module_name == NULL);
/* Fallback on walking sys.modules */
modules = _PySys_GetObjectId(&PyId_modules);
if (modules == NULL) {
PyErr_SetString(PyExc_RuntimeError, "unable to get sys.modules");
return NULL;
}
if (PyDict_CheckExact(modules)) {
i = 0;
while (PyDict_Next(modules, &i, &module_name, &module)) {
if (_checkmodule(module_name, module, global, dotted_path) == 0) {
Py_INCREF(module_name);
return module_name;
}
if (PyErr_Occurred()) {
return NULL;
}
}
}
else {
PyObject *iterator = PyObject_GetIter(modules);
if (iterator == NULL) {
return NULL;
}
while ((module_name = PyIter_Next(iterator))) {
module = PyObject_GetItem(modules, module_name);
if (module == NULL) {
Py_DECREF(module_name);
Py_DECREF(iterator);
return NULL;
}
if (_checkmodule(module_name, module, global, dotted_path) == 0) {
Py_DECREF(module);
Py_DECREF(iterator);
return module_name;
}
Py_DECREF(module);
Py_DECREF(module_name);
if (PyErr_Occurred()) {
Py_DECREF(iterator);
return NULL;
}
}
Py_DECREF(iterator);
}
/* If no module is found, use __main__. */
module_name = _PyUnicode_FromId(&PyId___main__);
Py_XINCREF(module_name);
return module_name;
}
/* fast_save_enter() and fast_save_leave() are guards against recursive
objects when Pickler is used with the "fast mode" (i.e., with object
memoization disabled). If the nesting of a list or dict object exceed
FAST_NESTING_LIMIT, these guards will start keeping an internal
reference to the seen list or dict objects and check whether these objects
are recursive. These are not strictly necessary, since save() has a
hard-coded recursion limit, but they give a nicer error message than the
typical RuntimeError. */
static int
fast_save_enter(PicklerObject *self, PyObject *obj)
{
/* if fast_nesting < 0, we're doing an error exit. */
if (++self->fast_nesting >= FAST_NESTING_LIMIT) {
PyObject *key = NULL;
if (self->fast_memo == NULL) {
self->fast_memo = PyDict_New();
if (self->fast_memo == NULL) {
self->fast_nesting = -1;
return 0;
}
}
key = PyLong_FromVoidPtr(obj);
if (key == NULL) {
self->fast_nesting = -1;
return 0;
}
if (PyDict_GetItemWithError(self->fast_memo, key)) {
Py_DECREF(key);
PyErr_Format(PyExc_ValueError,
"fast mode: can't pickle cyclic objects "
"including object type %.200s at %p",
obj->ob_type->tp_name, obj);
self->fast_nesting = -1;
return 0;
}
if (PyErr_Occurred()) {
Py_DECREF(key);
self->fast_nesting = -1;
return 0;
}
if (PyDict_SetItem(self->fast_memo, key, Py_None) < 0) {
Py_DECREF(key);
self->fast_nesting = -1;
return 0;
}
Py_DECREF(key);
}
return 1;
}
static int
fast_save_leave(PicklerObject *self, PyObject *obj)
{
if (self->fast_nesting-- >= FAST_NESTING_LIMIT) {
PyObject *key = PyLong_FromVoidPtr(obj);
if (key == NULL)
return 0;
if (PyDict_DelItem(self->fast_memo, key) < 0) {
Py_DECREF(key);
return 0;
}
Py_DECREF(key);
}
return 1;
}
static int
save_none(PicklerObject *self, PyObject *obj)
{
const char none_op = NONE;
if (_Pickler_Write(self, &none_op, 1) < 0)
return -1;
return 0;
}
static int
save_bool(PicklerObject *self, PyObject *obj)
{
if (self->proto >= 2) {
const char bool_op = (obj == Py_True) ? NEWTRUE : NEWFALSE;
if (_Pickler_Write(self, &bool_op, 1) < 0)
return -1;
}
else {
/* These aren't opcodes -- they're ways to pickle bools before protocol 2
* so that unpicklers written before bools were introduced unpickle them
* as ints, but unpicklers after can recognize that bools were intended.
* Note that protocol 2 added direct ways to pickle bools.
*/
const char *bool_str = (obj == Py_True) ? "I01\n" : "I00\n";
if (_Pickler_Write(self, bool_str, strlen(bool_str)) < 0)
return -1;
}
return 0;
}
static int
save_long(PicklerObject *self, PyObject *obj)
{
PyObject *repr = NULL;
Py_ssize_t size;
long val;
int overflow;
int status = 0;
val= PyLong_AsLongAndOverflow(obj, &overflow);
if (!overflow && (sizeof(long) <= 4 ||
(val <= 0x7fffffffL && val >= (-0x7fffffffL - 1))))
{
/* result fits in a signed 4-byte integer.
Note: we can't use -0x80000000L in the above condition because some
compilers (e.g., MSVC) will promote 0x80000000L to an unsigned type
before applying the unary minus when sizeof(long) <= 4. The
resulting value stays unsigned which is commonly not what we want,
so MSVC happily warns us about it. However, that result would have
been fine because we guard for sizeof(long) <= 4 which turns the
condition true in that particular case. */
char pdata[32];
Py_ssize_t len = 0;
if (self->bin) {
pdata[1] = (unsigned char)(val & 0xff);
pdata[2] = (unsigned char)((val >> 8) & 0xff);
pdata[3] = (unsigned char)((val >> 16) & 0xff);
pdata[4] = (unsigned char)((val >> 24) & 0xff);
if ((pdata[4] != 0) || (pdata[3] != 0)) {
pdata[0] = BININT;
len = 5;
}
else if (pdata[2] != 0) {
pdata[0] = BININT2;
len = 3;
}
else {
pdata[0] = BININT1;
len = 2;
}
}
else {
sprintf(pdata, "%c%ld\n", INT, val);
len = strlen(pdata);
}
if (_Pickler_Write(self, pdata, len) < 0)
return -1;
return 0;
}
assert(!PyErr_Occurred());
if (self->proto >= 2) {
/* Linear-time pickling. */
size_t nbits;
size_t nbytes;
unsigned char *pdata;
char header[5];
int i;
int sign = _PyLong_Sign(obj);
if (sign == 0) {
header[0] = LONG1;
header[1] = 0; /* It's 0 -- an empty bytestring. */
if (_Pickler_Write(self, header, 2) < 0)
goto error;
return 0;
}
nbits = _PyLong_NumBits(obj);
if (nbits == (size_t)-1 && PyErr_Occurred())
goto error;
/* How many bytes do we need? There are nbits >> 3 full
* bytes of data, and nbits & 7 leftover bits. If there
* are any leftover bits, then we clearly need another
* byte. Wnat's not so obvious is that we *probably*
* need another byte even if there aren't any leftovers:
* the most-significant bit of the most-significant byte
* acts like a sign bit, and it's usually got a sense
* opposite of the one we need. The exception is ints
* of the form -(2**(8*j-1)) for j > 0. Such an int is
* its own 256's-complement, so has the right sign bit
* even without the extra byte. That's a pain to check
* for in advance, though, so we always grab an extra
* byte at the start, and cut it back later if possible.
*/
nbytes = (nbits >> 3) + 1;
if (nbytes > 0x7fffffffL) {
PyErr_SetString(PyExc_OverflowError,
"int too large to pickle");
goto error;
}
repr = PyBytes_FromStringAndSize(NULL, (Py_ssize_t)nbytes);
if (repr == NULL)
goto error;
pdata = (unsigned char *)PyBytes_AS_STRING(repr);
i = _PyLong_AsByteArray((PyLongObject *)obj,
pdata, nbytes,
1 /* little endian */ , 1 /* signed */ );
if (i < 0)
goto error;
/* If the int is negative, this may be a byte more than
* needed. This is so iff the MSB is all redundant sign
* bits.
*/
if (sign < 0 &&
nbytes > 1 &&
pdata[nbytes - 1] == 0xff &&
(pdata[nbytes - 2] & 0x80) != 0) {
nbytes--;
}
if (nbytes < 256) {
header[0] = LONG1;
header[1] = (unsigned char)nbytes;
size = 2;
}
else {
header[0] = LONG4;
size = (Py_ssize_t) nbytes;
for (i = 1; i < 5; i++) {
header[i] = (unsigned char)(size & 0xff);
size >>= 8;
}
size = 5;
}
if (_Pickler_Write(self, header, size) < 0 ||
_Pickler_Write(self, (char *)pdata, (int)nbytes) < 0)
goto error;
}
else {
const char long_op = LONG;
const char *string;
/* proto < 2: write the repr and newline. This is quadratic-time (in
the number of digits), in both directions. We add a trailing 'L'
to the repr, for compatibility with Python 2.x. */
repr = PyObject_Repr(obj);
if (repr == NULL)
goto error;
string = PyUnicode_AsUTF8AndSize(repr, &size);
if (string == NULL)
goto error;
if (_Pickler_Write(self, &long_op, 1) < 0 ||
_Pickler_Write(self, string, size) < 0 ||
_Pickler_Write(self, "L\n", 2) < 0)
goto error;
}
if (0) {
error:
status = -1;
}
Py_XDECREF(repr);
return status;
}
static int
save_float(PicklerObject *self, PyObject *obj)
{
double x = PyFloat_AS_DOUBLE((PyFloatObject *)obj);
if (self->bin) {
char pdata[9];
pdata[0] = BINFLOAT;
if (_PyFloat_Pack8(x, (unsigned char *)&pdata[1], 0) < 0)
return -1;
if (_Pickler_Write(self, pdata, 9) < 0)
return -1;
}
else {
int result = -1;
char *buf = NULL;
char op = FLOAT;
if (_Pickler_Write(self, &op, 1) < 0)
goto done;
buf = PyOS_double_to_string(x, 'r', 0, Py_DTSF_ADD_DOT_0, NULL);
if (!buf) {
PyErr_NoMemory();
goto done;
}
if (_Pickler_Write(self, buf, strlen(buf)) < 0)
goto done;
if (_Pickler_Write(self, "\n", 1) < 0)
goto done;
result = 0;
done:
PyMem_Free(buf);
return result;
}
return 0;
}
/* Perform direct write of the header and payload of the binary object.
The large contiguous data is written directly into the underlying file
object, bypassing the output_buffer of the Pickler. We intentionally
do not insert a protocol 4 frame opcode to make it possible to optimize
file.read calls in the loader.
*/
static int
_Pickler_write_bytes(PicklerObject *self,
const char *header, Py_ssize_t header_size,
const char *data, Py_ssize_t data_size,
PyObject *payload)
{
int bypass_buffer = (data_size >= FRAME_SIZE_TARGET);
int framing = self->framing;
if (bypass_buffer) {
assert(self->output_buffer != NULL);
/* Commit the previous frame. */
if (_Pickler_CommitFrame(self)) {
return -1;
}
/* Disable framing temporarily */
self->framing = 0;
}
if (_Pickler_Write(self, header, header_size) < 0) {
return -1;
}
if (bypass_buffer && self->write != NULL) {
/* Bypass the in-memory buffer to directly stream large data
into the underlying file object. */
PyObject *result, *mem = NULL;
/* Dump the output buffer to the file. */
if (_Pickler_FlushToFile(self) < 0) {
return -1;
}
/* Stream write the payload into the file without going through the
output buffer. */
if (payload == NULL) {
/* TODO: It would be better to use a memoryview with a linked
original string if this is possible. */
payload = mem = PyBytes_FromStringAndSize(data, data_size);
if (payload == NULL) {
return -1;
}
}
result = PyObject_CallFunctionObjArgs(self->write, payload, NULL);
Py_XDECREF(mem);
if (result == NULL) {
return -1;
}
Py_DECREF(result);
/* Reinitialize the buffer for subsequent calls to _Pickler_Write. */
if (_Pickler_ClearBuffer(self) < 0) {
return -1;
}
}
else {
if (_Pickler_Write(self, data, data_size) < 0) {
return -1;
}
}
/* Re-enable framing for subsequent calls to _Pickler_Write. */
self->framing = framing;
return 0;
}
static int
save_bytes(PicklerObject *self, PyObject *obj)
{
if (self->proto < 3) {
/* Older pickle protocols do not have an opcode for pickling bytes
objects. Therefore, we need to fake the copy protocol (i.e.,
the __reduce__ method) to permit bytes object unpickling.
Here we use a hack to be compatible with Python 2. Since in Python
2 'bytes' is just an alias for 'str' (which has different
parameters than the actual bytes object), we use codecs.encode
to create the appropriate 'str' object when unpickled using
Python 2 *and* the appropriate 'bytes' object when unpickled
using Python 3. Again this is a hack and we don't need to do this
with newer protocols. */
PyObject *reduce_value = NULL;
int status;
if (PyBytes_GET_SIZE(obj) == 0) {
reduce_value = Py_BuildValue("(O())", (PyObject*)&PyBytes_Type);
}
else {
PickleState *st = _Pickle_GetGlobalState();
PyObject *unicode_str =
PyUnicode_DecodeLatin1(PyBytes_AS_STRING(obj),
PyBytes_GET_SIZE(obj),
"strict");
_Py_IDENTIFIER(latin1);
if (unicode_str == NULL)
return -1;
reduce_value = Py_BuildValue("(O(OO))",
st->codecs_encode, unicode_str,
_PyUnicode_FromId(&PyId_latin1));
Py_DECREF(unicode_str);
}
if (reduce_value == NULL)
return -1;
/* save_reduce() will memoize the object automatically. */
status = save_reduce(self, reduce_value, obj);
Py_DECREF(reduce_value);
return status;
}
else {
Py_ssize_t size;
char header[9];
Py_ssize_t len;
size = PyBytes_GET_SIZE(obj);
if (size < 0)
return -1;
if (size <= 0xff) {
header[0] = SHORT_BINBYTES;
header[1] = (unsigned char)size;
len = 2;
}
else if ((size_t)size <= 0xffffffffUL) {
header[0] = BINBYTES;
header[1] = (unsigned char)(size & 0xff);
header[2] = (unsigned char)((size >> 8) & 0xff);
header[3] = (unsigned char)((size >> 16) & 0xff);
header[4] = (unsigned char)((size >> 24) & 0xff);
len = 5;
}
else if (self->proto >= 4) {
header[0] = BINBYTES8;
_write_size64(header + 1, size);
len = 9;
}
else {
PyErr_SetString(PyExc_OverflowError,
"cannot serialize a bytes object larger than 4 GiB");
return -1; /* string too large */
}
if (_Pickler_write_bytes(self, header, len,
PyBytes_AS_STRING(obj), size, obj) < 0)
{
return -1;
}
if (memo_put(self, obj) < 0)
return -1;
return 0;
}
}
/* A copy of PyUnicode_EncodeRawUnicodeEscape() that also translates
backslash and newline characters to \uXXXX escapes. */
static PyObject *
raw_unicode_escape(PyObject *obj)
{
char *p;
Py_ssize_t i, size;
void *data;
unsigned int kind;
_PyBytesWriter writer;
if (PyUnicode_READY(obj))
return NULL;
_PyBytesWriter_Init(&writer);
size = PyUnicode_GET_LENGTH(obj);
data = PyUnicode_DATA(obj);
kind = PyUnicode_KIND(obj);
p = _PyBytesWriter_Alloc(&writer, size);
if (p == NULL)
goto error;
writer.overallocate = 1;
for (i=0; i < size; i++) {
Py_UCS4 ch = PyUnicode_READ(kind, data, i);
/* Map 32-bit characters to '\Uxxxxxxxx' */
if (ch >= 0x10000) {
/* -1: subtract 1 preallocated byte */
p = _PyBytesWriter_Prepare(&writer, p, 10-1);
if (p == NULL)
goto error;
*p++ = '\\';
*p++ = 'U';
*p++ = Py_hexdigits[(ch >> 28) & 0xf];
*p++ = Py_hexdigits[(ch >> 24) & 0xf];
*p++ = Py_hexdigits[(ch >> 20) & 0xf];
*p++ = Py_hexdigits[(ch >> 16) & 0xf];
*p++ = Py_hexdigits[(ch >> 12) & 0xf];
*p++ = Py_hexdigits[(ch >> 8) & 0xf];
*p++ = Py_hexdigits[(ch >> 4) & 0xf];
*p++ = Py_hexdigits[ch & 15];
}
/* Map 16-bit characters, '\\' and '\n' to '\uxxxx' */
else if (ch >= 256 || ch == '\\' || ch == '\n') {
/* -1: subtract 1 preallocated byte */
p = _PyBytesWriter_Prepare(&writer, p, 6-1);
if (p == NULL)
goto error;
*p++ = '\\';
*p++ = 'u';
*p++ = Py_hexdigits[(ch >> 12) & 0xf];
*p++ = Py_hexdigits[(ch >> 8) & 0xf];
*p++ = Py_hexdigits[(ch >> 4) & 0xf];
*p++ = Py_hexdigits[ch & 15];
}
/* Copy everything else as-is */
else
*p++ = (char) ch;
}
return _PyBytesWriter_Finish(&writer, p);
error:
_PyBytesWriter_Dealloc(&writer);
return NULL;
}
static int
write_unicode_binary(PicklerObject *self, PyObject *obj)
{
char header[9];
Py_ssize_t len;
PyObject *encoded = NULL;
Py_ssize_t size;
const char *data;
if (PyUnicode_READY(obj))
return -1;
data = PyUnicode_AsUTF8AndSize(obj, &size);
if (data == NULL) {
/* Issue #8383: for strings with lone surrogates, fallback on the
"surrogatepass" error handler. */
PyErr_Clear();
encoded = PyUnicode_AsEncodedString(obj, "utf-8", "surrogatepass");
if (encoded == NULL)
return -1;
data = PyBytes_AS_STRING(encoded);
size = PyBytes_GET_SIZE(encoded);
}
assert(size >= 0);
if (size <= 0xff && self->proto >= 4) {
header[0] = SHORT_BINUNICODE;
header[1] = (unsigned char)(size & 0xff);
len = 2;
}
else if ((size_t)size <= 0xffffffffUL) {
header[0] = BINUNICODE;
header[1] = (unsigned char)(size & 0xff);
header[2] = (unsigned char)((size >> 8) & 0xff);
header[3] = (unsigned char)((size >> 16) & 0xff);
header[4] = (unsigned char)((size >> 24) & 0xff);
len = 5;
}
else if (self->proto >= 4) {
header[0] = BINUNICODE8;
_write_size64(header + 1, size);
len = 9;
}
else {
PyErr_SetString(PyExc_OverflowError,
"cannot serialize a string larger than 4GiB");
Py_XDECREF(encoded);
return -1;
}
if (_Pickler_write_bytes(self, header, len, data, size, encoded) < 0) {
Py_XDECREF(encoded);
return -1;
}
Py_XDECREF(encoded);
return 0;
}
static int
save_unicode(PicklerObject *self, PyObject *obj)
{
if (self->bin) {
if (write_unicode_binary(self, obj) < 0)
return -1;
}
else {
PyObject *encoded;
Py_ssize_t size;
const char unicode_op = UNICODE;
encoded = raw_unicode_escape(obj);
if (encoded == NULL)
return -1;
if (_Pickler_Write(self, &unicode_op, 1) < 0) {
Py_DECREF(encoded);
return -1;
}
size = PyBytes_GET_SIZE(encoded);
if (_Pickler_Write(self, PyBytes_AS_STRING(encoded), size) < 0) {
Py_DECREF(encoded);
return -1;
}
Py_DECREF(encoded);
if (_Pickler_Write(self, "\n", 1) < 0)
return -1;
}
if (memo_put(self, obj) < 0)
return -1;
return 0;
}
/* A helper for save_tuple. Push the len elements in tuple t on the stack. */
static int
store_tuple_elements(PicklerObject *self, PyObject *t, Py_ssize_t len)
{
Py_ssize_t i;
assert(PyTuple_Size(t) == len);
for (i = 0; i < len; i++) {
PyObject *element = PyTuple_GET_ITEM(t, i);
if (element == NULL)
return -1;
if (save(self, element, 0) < 0)
return -1;
}
return 0;
}
/* Tuples are ubiquitous in the pickle protocols, so many techniques are
* used across protocols to minimize the space needed to pickle them.
* Tuples are also the only builtin immutable type that can be recursive
* (a tuple can be reached from itself), and that requires some subtle
* magic so that it works in all cases. IOW, this is a long routine.
*/
static int
save_tuple(PicklerObject *self, PyObject *obj)
{
Py_ssize_t len, i;
const char mark_op = MARK;
const char tuple_op = TUPLE;
const char pop_op = POP;
const char pop_mark_op = POP_MARK;
const char len2opcode[] = {EMPTY_TUPLE, TUPLE1, TUPLE2, TUPLE3};
if ((len = PyTuple_Size(obj)) < 0)
return -1;
if (len == 0) {
char pdata[2];
if (self->proto) {
pdata[0] = EMPTY_TUPLE;
len = 1;
}
else {
pdata[0] = MARK;
pdata[1] = TUPLE;
len = 2;
}
if (_Pickler_Write(self, pdata, len) < 0)
return -1;
return 0;
}
/* The tuple isn't in the memo now. If it shows up there after
* saving the tuple elements, the tuple must be recursive, in
* which case we'll pop everything we put on the stack, and fetch
* its value from the memo.
*/
if (len <= 3 && self->proto >= 2) {
/* Use TUPLE{1,2,3} opcodes. */
if (store_tuple_elements(self, obj, len) < 0)
return -1;
if (PyMemoTable_Get(self->memo, obj)) {
/* pop the len elements */
for (i = 0; i < len; i++)
if (_Pickler_Write(self, &pop_op, 1) < 0)
return -1;
/* fetch from memo */
if (memo_get(self, obj) < 0)
return -1;
return 0;
}
else { /* Not recursive. */
if (_Pickler_Write(self, len2opcode + len, 1) < 0)
return -1;
}
goto memoize;
}
/* proto < 2 and len > 0, or proto >= 2 and len > 3.
* Generate MARK e1 e2 ... TUPLE
*/
if (_Pickler_Write(self, &mark_op, 1) < 0)
return -1;
if (store_tuple_elements(self, obj, len) < 0)
return -1;
if (PyMemoTable_Get(self->memo, obj)) {
/* pop the stack stuff we pushed */
if (self->bin) {
if (_Pickler_Write(self, &pop_mark_op, 1) < 0)
return -1;
}
else {
/* Note that we pop one more than len, to remove
* the MARK too.
*/
for (i = 0; i <= len; i++)
if (_Pickler_Write(self, &pop_op, 1) < 0)
return -1;
}
/* fetch from memo */
if (memo_get(self, obj) < 0)
return -1;
return 0;
}
else { /* Not recursive. */
if (_Pickler_Write(self, &tuple_op, 1) < 0)
return -1;
}
memoize:
if (memo_put(self, obj) < 0)
return -1;
return 0;
}
/* iter is an iterator giving items, and we batch up chunks of
* MARK item item ... item APPENDS
* opcode sequences. Calling code should have arranged to first create an
* empty list, or list-like object, for the APPENDS to operate on.
* Returns 0 on success, <0 on error.
*/
static int
batch_list(PicklerObject *self, PyObject *iter)
{
PyObject *obj = NULL;
PyObject *firstitem = NULL;
int i, n;
const char mark_op = MARK;
const char append_op = APPEND;
const char appends_op = APPENDS;
assert(iter != NULL);
/* XXX: I think this function could be made faster by avoiding the
iterator interface and fetching objects directly from list using
PyList_GET_ITEM.
*/
if (self->proto == 0) {
/* APPENDS isn't available; do one at a time. */
for (;;) {
obj = PyIter_Next(iter);
if (obj == NULL) {
if (PyErr_Occurred())
return -1;
break;
}
i = save(self, obj, 0);
Py_DECREF(obj);
if (i < 0)
return -1;
if (_Pickler_Write(self, &append_op, 1) < 0)
return -1;
}
return 0;
}
/* proto > 0: write in batches of BATCHSIZE. */
do {
/* Get first item */
firstitem = PyIter_Next(iter);
if (firstitem == NULL) {
if (PyErr_Occurred())
goto error;
/* nothing more to add */
break;
}
/* Try to get a second item */
obj = PyIter_Next(iter);
if (obj == NULL) {
if (PyErr_Occurred())
goto error;
/* Only one item to write */
if (save(self, firstitem, 0) < 0)
goto error;
if (_Pickler_Write(self, &append_op, 1) < 0)
goto error;
Py_CLEAR(firstitem);
break;
}
/* More than one item to write */
/* Pump out MARK, items, APPENDS. */
if (_Pickler_Write(self, &mark_op, 1) < 0)
goto error;
if (save(self, firstitem, 0) < 0)
goto error;
Py_CLEAR(firstitem);
n = 1;
/* Fetch and save up to BATCHSIZE items */
while (obj) {
if (save(self, obj, 0) < 0)
goto error;
Py_CLEAR(obj);
n += 1;
if (n == BATCHSIZE)
break;
obj = PyIter_Next(iter);
if (obj == NULL) {
if (PyErr_Occurred())
goto error;
break;
}
}
if (_Pickler_Write(self, &appends_op, 1) < 0)
goto error;
} while (n == BATCHSIZE);
return 0;
error:
Py_XDECREF(firstitem);
Py_XDECREF(obj);
return -1;
}
/* This is a variant of batch_list() above, specialized for lists (with no
* support for list subclasses). Like batch_list(), we batch up chunks of
* MARK item item ... item APPENDS
* opcode sequences. Calling code should have arranged to first create an
* empty list, or list-like object, for the APPENDS to operate on.
* Returns 0 on success, -1 on error.
*
* This version is considerably faster than batch_list(), if less general.
*
* Note that this only works for protocols > 0.
*/
static int
batch_list_exact(PicklerObject *self, PyObject *obj)
{
PyObject *item = NULL;
Py_ssize_t this_batch, total;
const char append_op = APPEND;
const char appends_op = APPENDS;
const char mark_op = MARK;
assert(obj != NULL);
assert(self->proto > 0);
assert(PyList_CheckExact(obj));
if (PyList_GET_SIZE(obj) == 1) {
item = PyList_GET_ITEM(obj, 0);
if (save(self, item, 0) < 0)
return -1;
if (_Pickler_Write(self, &append_op, 1) < 0)
return -1;
return 0;
}
/* Write in batches of BATCHSIZE. */
total = 0;
do {
this_batch = 0;
if (_Pickler_Write(self, &mark_op, 1) < 0)
return -1;
while (total < PyList_GET_SIZE(obj)) {
item = PyList_GET_ITEM(obj, total);
if (save(self, item, 0) < 0)
return -1;
total++;
if (++this_batch == BATCHSIZE)
break;
}
if (_Pickler_Write(self, &appends_op, 1) < 0)
return -1;
} while (total < PyList_GET_SIZE(obj));
return 0;
}
static int
save_list(PicklerObject *self, PyObject *obj)
{
char header[3];
Py_ssize_t len;
int status = 0;
if (self->fast && !fast_save_enter(self, obj))
goto error;
/* Create an empty list. */
if (self->bin) {
header[0] = EMPTY_LIST;
len = 1;
}
else {
header[0] = MARK;
header[1] = LIST;
len = 2;
}
if (_Pickler_Write(self, header, len) < 0)
goto error;
/* Get list length, and bow out early if empty. */
if ((len = PyList_Size(obj)) < 0)
goto error;
if (memo_put(self, obj) < 0)
goto error;
if (len != 0) {
/* Materialize the list elements. */
if (PyList_CheckExact(obj) && self->proto > 0) {
if (Py_EnterRecursiveCall(" while pickling an object"))
goto error;
status = batch_list_exact(self, obj);
Py_LeaveRecursiveCall();
} else {
PyObject *iter = PyObject_GetIter(obj);
if (iter == NULL)
goto error;
if (Py_EnterRecursiveCall(" while pickling an object")) {
Py_DECREF(iter);
goto error;
}
status = batch_list(self, iter);
Py_LeaveRecursiveCall();
Py_DECREF(iter);
}
}
if (0) {
error:
status = -1;
}
if (self->fast && !fast_save_leave(self, obj))
status = -1;
return status;
}
/* iter is an iterator giving (key, value) pairs, and we batch up chunks of
* MARK key value ... key value SETITEMS
* opcode sequences. Calling code should have arranged to first create an
* empty dict, or dict-like object, for the SETITEMS to operate on.
* Returns 0 on success, <0 on error.
*
* This is very much like batch_list(). The difference between saving
* elements directly, and picking apart two-tuples, is so long-winded at
* the C level, though, that attempts to combine these routines were too
* ugly to bear.
*/
static int
batch_dict(PicklerObject *self, PyObject *iter)
{
PyObject *obj = NULL;
PyObject *firstitem = NULL;
int i, n;
const char mark_op = MARK;
const char setitem_op = SETITEM;
const char setitems_op = SETITEMS;
assert(iter != NULL);
if (self->proto == 0) {
/* SETITEMS isn't available; do one at a time. */
for (;;) {
obj = PyIter_Next(iter);
if (obj == NULL) {
if (PyErr_Occurred())
return -1;
break;
}
if (!PyTuple_Check(obj) || PyTuple_Size(obj) != 2) {
PyErr_SetString(PyExc_TypeError, "dict items "
"iterator must return 2-tuples");
return -1;
}
i = save(self, PyTuple_GET_ITEM(obj, 0), 0);
if (i >= 0)
i = save(self, PyTuple_GET_ITEM(obj, 1), 0);
Py_DECREF(obj);
if (i < 0)
return -1;
if (_Pickler_Write(self, &setitem_op, 1) < 0)
return -1;
}
return 0;
}
/* proto > 0: write in batches of BATCHSIZE. */
do {
/* Get first item */
firstitem = PyIter_Next(iter);
if (firstitem == NULL) {
if (PyErr_Occurred())
goto error;
/* nothing more to add */
break;
}
if (!PyTuple_Check(firstitem) || PyTuple_Size(firstitem) != 2) {
PyErr_SetString(PyExc_TypeError, "dict items "
"iterator must return 2-tuples");
goto error;
}
/* Try to get a second item */
obj = PyIter_Next(iter);
if (obj == NULL) {
if (PyErr_Occurred())
goto error;
/* Only one item to write */
if (save(self, PyTuple_GET_ITEM(firstitem, 0), 0) < 0)
goto error;
if (save(self, PyTuple_GET_ITEM(firstitem, 1), 0) < 0)
goto error;
if (_Pickler_Write(self, &setitem_op, 1) < 0)
goto error;
Py_CLEAR(firstitem);
break;
}
/* More than one item to write */
/* Pump out MARK, items, SETITEMS. */
if (_Pickler_Write(self, &mark_op, 1) < 0)
goto error;
if (save(self, PyTuple_GET_ITEM(firstitem, 0), 0) < 0)
goto error;
if (save(self, PyTuple_GET_ITEM(firstitem, 1), 0) < 0)
goto error;
Py_CLEAR(firstitem);
n = 1;
/* Fetch and save up to BATCHSIZE items */
while (obj) {
if (!PyTuple_Check(obj) || PyTuple_Size(obj) != 2) {
PyErr_SetString(PyExc_TypeError, "dict items "
"iterator must return 2-tuples");
goto error;
}
if (save(self, PyTuple_GET_ITEM(obj, 0), 0) < 0 ||
save(self, PyTuple_GET_ITEM(obj, 1), 0) < 0)
goto error;
Py_CLEAR(obj);
n += 1;
if (n == BATCHSIZE)
break;
obj = PyIter_Next(iter);
if (obj == NULL) {
if (PyErr_Occurred())
goto error;
break;
}
}
if (_Pickler_Write(self, &setitems_op, 1) < 0)
goto error;
} while (n == BATCHSIZE);
return 0;
error:
Py_XDECREF(firstitem);
Py_XDECREF(obj);
return -1;
}
/* This is a variant of batch_dict() above that specializes for dicts, with no
* support for dict subclasses. Like batch_dict(), we batch up chunks of
* MARK key value ... key value SETITEMS
* opcode sequences. Calling code should have arranged to first create an
* empty dict, or dict-like object, for the SETITEMS to operate on.
* Returns 0 on success, -1 on error.
*
* Note that this currently doesn't work for protocol 0.
*/
static int
batch_dict_exact(PicklerObject *self, PyObject *obj)
{
PyObject *key = NULL, *value = NULL;
int i;
Py_ssize_t dict_size, ppos = 0;
const char mark_op = MARK;
const char setitem_op = SETITEM;
const char setitems_op = SETITEMS;
assert(obj != NULL && PyDict_CheckExact(obj));
assert(self->proto > 0);
dict_size = PyDict_GET_SIZE(obj);
/* Special-case len(d) == 1 to save space. */
if (dict_size == 1) {
PyDict_Next(obj, &ppos, &key, &value);
if (save(self, key, 0) < 0)
return -1;
if (save(self, value, 0) < 0)
return -1;
if (_Pickler_Write(self, &setitem_op, 1) < 0)
return -1;
return 0;
}
/* Write in batches of BATCHSIZE. */
do {
i = 0;
if (_Pickler_Write(self, &mark_op, 1) < 0)
return -1;
while (PyDict_Next(obj, &ppos, &key, &value)) {
if (save(self, key, 0) < 0)
return -1;
if (save(self, value, 0) < 0)
return -1;
if (++i == BATCHSIZE)
break;
}
if (_Pickler_Write(self, &setitems_op, 1) < 0)
return -1;
if (PyDict_GET_SIZE(obj) != dict_size) {
PyErr_Format(
PyExc_RuntimeError,
"dictionary changed size during iteration");
return -1;
}
} while (i == BATCHSIZE);
return 0;
}
static int
save_dict(PicklerObject *self, PyObject *obj)
{
PyObject *items, *iter;
char header[3];
Py_ssize_t len;
int status = 0;
assert(PyDict_Check(obj));
if (self->fast && !fast_save_enter(self, obj))
goto error;
/* Create an empty dict. */
if (self->bin) {
header[0] = EMPTY_DICT;
len = 1;
}
else {
header[0] = MARK;
header[1] = DICT;
len = 2;
}
if (_Pickler_Write(self, header, len) < 0)
goto error;
if (memo_put(self, obj) < 0)
goto error;
if (PyDict_GET_SIZE(obj)) {
/* Save the dict items. */
if (PyDict_CheckExact(obj) && self->proto > 0) {
/* We can take certain shortcuts if we know this is a dict and
not a dict subclass. */
if (Py_EnterRecursiveCall(" while pickling an object"))
goto error;
status = batch_dict_exact(self, obj);
Py_LeaveRecursiveCall();
} else {
_Py_IDENTIFIER(items);
items = _PyObject_CallMethodId(obj, &PyId_items, NULL);
if (items == NULL)
goto error;
iter = PyObject_GetIter(items);
Py_DECREF(items);
if (iter == NULL)
goto error;
if (Py_EnterRecursiveCall(" while pickling an object")) {
Py_DECREF(iter);
goto error;
}
status = batch_dict(self, iter);
Py_LeaveRecursiveCall();
Py_DECREF(iter);
}
}
if (0) {
error:
status = -1;
}
if (self->fast && !fast_save_leave(self, obj))
status = -1;
return status;
}
static int
save_set(PicklerObject *self, PyObject *obj)
{
PyObject *item;
int i;
Py_ssize_t set_size, ppos = 0;
Py_hash_t hash;
const char empty_set_op = EMPTY_SET;
const char mark_op = MARK;
const char additems_op = ADDITEMS;
if (self->proto < 4) {
PyObject *items;
PyObject *reduce_value;
int status;
items = PySequence_List(obj);
if (items == NULL) {
return -1;
}
reduce_value = Py_BuildValue("(O(O))", (PyObject*)&PySet_Type, items);
Py_DECREF(items);
if (reduce_value == NULL) {
return -1;
}
/* save_reduce() will memoize the object automatically. */
status = save_reduce(self, reduce_value, obj);
Py_DECREF(reduce_value);
return status;
}
if (_Pickler_Write(self, &empty_set_op, 1) < 0)
return -1;
if (memo_put(self, obj) < 0)
return -1;
set_size = PySet_GET_SIZE(obj);
if (set_size == 0)
return 0; /* nothing to do */
/* Write in batches of BATCHSIZE. */
do {
i = 0;
if (_Pickler_Write(self, &mark_op, 1) < 0)
return -1;
while (_PySet_NextEntry(obj, &ppos, &item, &hash)) {
if (save(self, item, 0) < 0)
return -1;
if (++i == BATCHSIZE)
break;
}
if (_Pickler_Write(self, &additems_op, 1) < 0)
return -1;
if (PySet_GET_SIZE(obj) != set_size) {
PyErr_Format(
PyExc_RuntimeError,
"set changed size during iteration");
return -1;
}
} while (i == BATCHSIZE);
return 0;
}
static int
save_frozenset(PicklerObject *self, PyObject *obj)
{
PyObject *iter;
const char mark_op = MARK;
const char frozenset_op = FROZENSET;
if (self->fast && !fast_save_enter(self, obj))
return -1;
if (self->proto < 4) {
PyObject *items;
PyObject *reduce_value;
int status;
items = PySequence_List(obj);
if (items == NULL) {
return -1;
}
reduce_value = Py_BuildValue("(O(O))", (PyObject*)&PyFrozenSet_Type,
items);
Py_DECREF(items);
if (reduce_value == NULL) {
return -1;
}
/* save_reduce() will memoize the object automatically. */
status = save_reduce(self, reduce_value, obj);
Py_DECREF(reduce_value);
return status;
}
if (_Pickler_Write(self, &mark_op, 1) < 0)
return -1;
iter = PyObject_GetIter(obj);
if (iter == NULL) {
return -1;
}
for (;;) {
PyObject *item;
item = PyIter_Next(iter);
if (item == NULL) {
if (PyErr_Occurred()) {
Py_DECREF(iter);
return -1;
}
break;
}
if (save(self, item, 0) < 0) {
Py_DECREF(item);
Py_DECREF(iter);
return -1;
}
Py_DECREF(item);
}
Py_DECREF(iter);
/* If the object is already in the memo, this means it is
recursive. In this case, throw away everything we put on the
stack, and fetch the object back from the memo. */
if (PyMemoTable_Get(self->memo, obj)) {
const char pop_mark_op = POP_MARK;
if (_Pickler_Write(self, &pop_mark_op, 1) < 0)
return -1;
if (memo_get(self, obj) < 0)
return -1;
return 0;
}
if (_Pickler_Write(self, &frozenset_op, 1) < 0)
return -1;
if (memo_put(self, obj) < 0)
return -1;
return 0;
}
static int
fix_imports(PyObject **module_name, PyObject **global_name)
{
PyObject *key;
PyObject *item;
PickleState *st = _Pickle_GetGlobalState();
key = PyTuple_Pack(2, *module_name, *global_name);
if (key == NULL)
return -1;
item = PyDict_GetItemWithError(st->name_mapping_3to2, key);
Py_DECREF(key);
if (item) {
PyObject *fixed_module_name;
PyObject *fixed_global_name;
if (!PyTuple_Check(item) || PyTuple_GET_SIZE(item) != 2) {
PyErr_Format(PyExc_RuntimeError,
"_compat_pickle.REVERSE_NAME_MAPPING values "
"should be 2-tuples, not %.200s",
Py_TYPE(item)->tp_name);
return -1;
}
fixed_module_name = PyTuple_GET_ITEM(item, 0);
fixed_global_name = PyTuple_GET_ITEM(item, 1);
if (!PyUnicode_Check(fixed_module_name) ||
!PyUnicode_Check(fixed_global_name)) {
PyErr_Format(PyExc_RuntimeError,
"_compat_pickle.REVERSE_NAME_MAPPING values "
"should be pairs of str, not (%.200s, %.200s)",
Py_TYPE(fixed_module_name)->tp_name,
Py_TYPE(fixed_global_name)->tp_name);
return -1;
}
Py_CLEAR(*module_name);
Py_CLEAR(*global_name);
Py_INCREF(fixed_module_name);
Py_INCREF(fixed_global_name);
*module_name = fixed_module_name;
*global_name = fixed_global_name;
return 0;
}
else if (PyErr_Occurred()) {
return -1;
}
item = PyDict_GetItemWithError(st->import_mapping_3to2, *module_name);
if (item) {
if (!PyUnicode_Check(item)) {
PyErr_Format(PyExc_RuntimeError,
"_compat_pickle.REVERSE_IMPORT_MAPPING values "
"should be strings, not %.200s",
Py_TYPE(item)->tp_name);
return -1;
}
Py_INCREF(item);
Py_XSETREF(*module_name, item);
}
else if (PyErr_Occurred()) {
return -1;
}
return 0;
}
static int
save_global(PicklerObject *self, PyObject *obj, PyObject *name)
{
PyObject *global_name = NULL;
PyObject *module_name = NULL;
PyObject *module = NULL;
PyObject *parent = NULL;
PyObject *dotted_path = NULL;
PyObject *lastname = NULL;
PyObject *cls;
PickleState *st = _Pickle_GetGlobalState();
int status = 0;
_Py_IDENTIFIER(__name__);
_Py_IDENTIFIER(__qualname__);
const char global_op = GLOBAL;
if (name) {
Py_INCREF(name);
global_name = name;
}
else {
if (_PyObject_LookupAttrId(obj, &PyId___qualname__, &global_name) < 0)
goto error;
if (global_name == NULL) {
global_name = _PyObject_GetAttrId(obj, &PyId___name__);
if (global_name == NULL)
goto error;
}
}
dotted_path = get_dotted_path(module, global_name);
if (dotted_path == NULL)
goto error;
module_name = whichmodule(obj, dotted_path);
if (module_name == NULL)
goto error;
/* XXX: Change to use the import C API directly with level=0 to disallow
relative imports.
XXX: PyImport_ImportModuleLevel could be used. However, this bypasses
builtins.__import__. Therefore, _pickle, unlike pickle.py, will ignore
custom import functions (IMHO, this would be a nice security
feature). The import C API would need to be extended to support the
extra parameters of __import__ to fix that. */
module = PyImport_Import(module_name);
if (module == NULL) {
PyErr_Format(st->PicklingError,
"Can't pickle %R: import of module %R failed",
obj, module_name);
goto error;
}
lastname = PyList_GET_ITEM(dotted_path, PyList_GET_SIZE(dotted_path)-1);
Py_INCREF(lastname);
cls = get_deep_attribute(module, dotted_path, &parent);
Py_CLEAR(dotted_path);
if (cls == NULL) {
PyErr_Format(st->PicklingError,
"Can't pickle %R: attribute lookup %S on %S failed",
obj, global_name, module_name);
goto error;
}
if (cls != obj) {
Py_DECREF(cls);
PyErr_Format(st->PicklingError,
"Can't pickle %R: it's not the same object as %S.%S",
obj, module_name, global_name);
goto error;
}
Py_DECREF(cls);
if (self->proto >= 2) {
/* See whether this is in the extension registry, and if
* so generate an EXT opcode.
*/
PyObject *extension_key;
PyObject *code_obj; /* extension code as Python object */
long code; /* extension code as C value */
char pdata[5];
Py_ssize_t n;
extension_key = PyTuple_Pack(2, module_name, global_name);
if (extension_key == NULL) {
goto error;
}
code_obj = PyDict_GetItemWithError(st->extension_registry,
extension_key);
Py_DECREF(extension_key);
/* The object is not registered in the extension registry.
This is the most likely code path. */
if (code_obj == NULL) {
if (PyErr_Occurred()) {
goto error;
}
goto gen_global;
}
/* XXX: pickle.py doesn't check neither the type, nor the range
of the value returned by the extension_registry. It should for
consistency. */
/* Verify code_obj has the right type and value. */
if (!PyLong_Check(code_obj)) {
PyErr_Format(st->PicklingError,
"Can't pickle %R: extension code %R isn't an integer",
obj, code_obj);
goto error;
}
code = PyLong_AS_LONG(code_obj);
if (code <= 0 || code > 0x7fffffffL) {
if (!PyErr_Occurred())
PyErr_Format(st->PicklingError, "Can't pickle %R: extension "
"code %ld is out of range", obj, code);
goto error;
}
/* Generate an EXT opcode. */
if (code <= 0xff) {
pdata[0] = EXT1;
pdata[1] = (unsigned char)code;
n = 2;
}
else if (code <= 0xffff) {
pdata[0] = EXT2;
pdata[1] = (unsigned char)(code & 0xff);
pdata[2] = (unsigned char)((code >> 8) & 0xff);
n = 3;
}
else {
pdata[0] = EXT4;
pdata[1] = (unsigned char)(code & 0xff);
pdata[2] = (unsigned char)((code >> 8) & 0xff);
pdata[3] = (unsigned char)((code >> 16) & 0xff);
pdata[4] = (unsigned char)((code >> 24) & 0xff);
n = 5;
}
if (_Pickler_Write(self, pdata, n) < 0)
goto error;
}
else {
gen_global:
if (parent == module) {
Py_INCREF(lastname);
Py_DECREF(global_name);
global_name = lastname;
}
if (self->proto >= 4) {
const char stack_global_op = STACK_GLOBAL;
if (save(self, module_name, 0) < 0)
goto error;
if (save(self, global_name, 0) < 0)
goto error;
if (_Pickler_Write(self, &stack_global_op, 1) < 0)
goto error;
}
else if (parent != module) {
PickleState *st = _Pickle_GetGlobalState();
PyObject *reduce_value = Py_BuildValue("(O(OO))",
st->getattr, parent, lastname);
status = save_reduce(self, reduce_value, NULL);
Py_DECREF(reduce_value);
if (status < 0)
goto error;
}
else {
/* Generate a normal global opcode if we are using a pickle
protocol < 4, or if the object is not registered in the
extension registry. */
PyObject *encoded;
PyObject *(*unicode_encoder)(PyObject *);
if (_Pickler_Write(self, &global_op, 1) < 0)
goto error;
/* For protocol < 3 and if the user didn't request against doing
so, we convert module names to the old 2.x module names. */
if (self->proto < 3 && self->fix_imports) {
if (fix_imports(&module_name, &global_name) < 0) {
goto error;
}
}
/* Since Python 3.0 now supports non-ASCII identifiers, we encode
both the module name and the global name using UTF-8. We do so
only when we are using the pickle protocol newer than version
3. This is to ensure compatibility with older Unpickler running
on Python 2.x. */
if (self->proto == 3) {
unicode_encoder = PyUnicode_AsUTF8String;
}
else {
unicode_encoder = PyUnicode_AsASCIIString;
}
encoded = unicode_encoder(module_name);
if (encoded == NULL) {
if (PyErr_ExceptionMatches(PyExc_UnicodeEncodeError))
PyErr_Format(st->PicklingError,
"can't pickle module identifier '%S' using "
"pickle protocol %i",
module_name, self->proto);
goto error;
}
if (_Pickler_Write(self, PyBytes_AS_STRING(encoded),
PyBytes_GET_SIZE(encoded)) < 0) {
Py_DECREF(encoded);
goto error;
}
Py_DECREF(encoded);
if(_Pickler_Write(self, "\n", 1) < 0)
goto error;
/* Save the name of the module. */
encoded = unicode_encoder(global_name);
if (encoded == NULL) {
if (PyErr_ExceptionMatches(PyExc_UnicodeEncodeError))
PyErr_Format(st->PicklingError,
"can't pickle global identifier '%S' using "
"pickle protocol %i",
global_name, self->proto);
goto error;
}
if (_Pickler_Write(self, PyBytes_AS_STRING(encoded),
PyBytes_GET_SIZE(encoded)) < 0) {
Py_DECREF(encoded);
goto error;
}
Py_DECREF(encoded);
if (_Pickler_Write(self, "\n", 1) < 0)
goto error;
}
/* Memoize the object. */
if (memo_put(self, obj) < 0)
goto error;
}
if (0) {
error:
status = -1;
}
Py_XDECREF(module_name);
Py_XDECREF(global_name);
Py_XDECREF(module);
Py_XDECREF(parent);
Py_XDECREF(dotted_path);
Py_XDECREF(lastname);
return status;
}
static int
save_singleton_type(PicklerObject *self, PyObject *obj, PyObject *singleton)
{
PyObject *reduce_value;
int status;
reduce_value = Py_BuildValue("O(O)", &PyType_Type, singleton);
if (reduce_value == NULL) {
return -1;
}
status = save_reduce(self, reduce_value, obj);
Py_DECREF(reduce_value);
return status;
}
static int
save_type(PicklerObject *self, PyObject *obj)
{
if (obj == (PyObject *)&_PyNone_Type) {
return save_singleton_type(self, obj, Py_None);
}
else if (obj == (PyObject *)&PyEllipsis_Type) {
return save_singleton_type(self, obj, Py_Ellipsis);
}
else if (obj == (PyObject *)&_PyNotImplemented_Type) {
return save_singleton_type(self, obj, Py_NotImplemented);
}
return save_global(self, obj, NULL);
}
static int
save_pers(PicklerObject *self, PyObject *obj)
{
PyObject *pid = NULL;
int status = 0;
const char persid_op = PERSID;
const char binpersid_op = BINPERSID;
pid = call_method(self->pers_func, self->pers_func_self, obj);
if (pid == NULL)
return -1;
if (pid != Py_None) {
if (self->bin) {
if (save(self, pid, 1) < 0 ||
_Pickler_Write(self, &binpersid_op, 1) < 0)
goto error;
}
else {
PyObject *pid_str;
pid_str = PyObject_Str(pid);
if (pid_str == NULL)
goto error;
/* XXX: Should it check whether the pid contains embedded
newlines? */
if (!PyUnicode_IS_ASCII(pid_str)) {
PyErr_SetString(_Pickle_GetGlobalState()->PicklingError,
"persistent IDs in protocol 0 must be "
"ASCII strings");
Py_DECREF(pid_str);
goto error;
}
if (_Pickler_Write(self, &persid_op, 1) < 0 ||
_Pickler_Write(self, PyUnicode_DATA(pid_str),
PyUnicode_GET_LENGTH(pid_str)) < 0 ||
_Pickler_Write(self, "\n", 1) < 0) {
Py_DECREF(pid_str);
goto error;
}
Py_DECREF(pid_str);
}
status = 1;
}
if (0) {
error:
status = -1;
}
Py_XDECREF(pid);
return status;
}
static PyObject *
get_class(PyObject *obj)
{
PyObject *cls;
_Py_IDENTIFIER(__class__);
if (_PyObject_LookupAttrId(obj, &PyId___class__, &cls) == 0) {
cls = (PyObject *) Py_TYPE(obj);
Py_INCREF(cls);
}
return cls;
}
/* We're saving obj, and args is the 2-thru-5 tuple returned by the
* appropriate __reduce__ method for obj.
*/
static int
save_reduce(PicklerObject *self, PyObject *args, PyObject *obj)
{
PyObject *callable;
PyObject *argtup;
PyObject *state = NULL;
PyObject *listitems = Py_None;
PyObject *dictitems = Py_None;
PickleState *st = _Pickle_GetGlobalState();
Py_ssize_t size;
int use_newobj = 0, use_newobj_ex = 0;
const char reduce_op = REDUCE;
const char build_op = BUILD;
const char newobj_op = NEWOBJ;
const char newobj_ex_op = NEWOBJ_EX;
size = PyTuple_Size(args);
if (size < 2 || size > 5) {
PyErr_SetString(st->PicklingError,