Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion Include/internal/pycore_lock.h
Original file line number Diff line number Diff line change
Expand Up @@ -160,8 +160,9 @@ typedef struct {

PyAPI_FUNC(int) _PyRecursiveMutex_IsLockedByCurrentThread(_PyRecursiveMutex *m);
PyAPI_FUNC(void) _PyRecursiveMutex_Lock(_PyRecursiveMutex *m);
extern PyLockStatus _PyRecursiveMutex_LockTimed(_PyRecursiveMutex *m, PyTime_t timeout, _PyLockFlags flags);
PyAPI_FUNC(void) _PyRecursiveMutex_Unlock(_PyRecursiveMutex *m);

extern int _PyRecursiveMutex_TryUnlock(_PyRecursiveMutex *m);

// A readers-writer (RW) lock. The lock supports multiple concurrent readers or
// a single writer. The lock is write-preferring: if a writer is waiting while
Expand Down
151 changes: 33 additions & 118 deletions Modules/_threadmodule.c
Original file line number Diff line number Diff line change
Expand Up @@ -726,11 +726,6 @@ lock_dealloc(PyObject *op)
Py_DECREF(tp);
}

static inline PyLockStatus
acquire_timed(PyThread_type_lock lock, PyTime_t timeout)
{
return PyThread_acquire_lock_timed_with_retries(lock, timeout);
}

static int
lock_acquire_parse_args(PyObject *args, PyObject *kwds,
Expand Down Expand Up @@ -973,10 +968,7 @@ static PyType_Spec lock_type_spec = {

typedef struct {
PyObject_HEAD
PyThread_type_lock rlock_lock;
PyThread_ident_t rlock_owner;
unsigned long rlock_count;
PyObject *in_weakreflist;
_PyRecursiveMutex lock;
} rlockobject;

static int
Expand All @@ -992,59 +984,26 @@ rlock_dealloc(PyObject *op)
{
rlockobject *self = (rlockobject*)op;
PyObject_GC_UnTrack(self);
if (self->in_weakreflist != NULL)
PyObject_ClearWeakRefs((PyObject *) self);
/* self->rlock_lock can be NULL if PyThread_allocate_lock() failed
in rlock_new() */
if (self->rlock_lock != NULL) {
/* Unlock the lock so it's safe to free it */
if (self->rlock_count > 0)
PyThread_release_lock(self->rlock_lock);

PyThread_free_lock(self->rlock_lock);
}
PyObject_ClearWeakRefs((PyObject *) self);
PyTypeObject *tp = Py_TYPE(self);
tp->tp_free(self);
Py_DECREF(tp);
}

static bool
rlock_is_owned_by(rlockobject *self, PyThread_ident_t tid)
{
PyThread_ident_t owner_tid =
_Py_atomic_load_ullong_relaxed(&self->rlock_owner);
return owner_tid == tid && self->rlock_count > 0;
}

static PyObject *
rlock_acquire(PyObject *op, PyObject *args, PyObject *kwds)
{
rlockobject *self = (rlockobject*)op;
PyTime_t timeout;
PyThread_ident_t tid;
PyLockStatus r = PY_LOCK_ACQUIRED;

if (lock_acquire_parse_args(args, kwds, &timeout) < 0)
if (lock_acquire_parse_args(args, kwds, &timeout) < 0) {
return NULL;

tid = PyThread_get_thread_ident_ex();
if (rlock_is_owned_by(self, tid)) {
unsigned long count = self->rlock_count + 1;
if (count <= self->rlock_count) {
PyErr_SetString(PyExc_OverflowError,
"Internal lock count overflowed");
return NULL;
}
self->rlock_count = count;
Py_RETURN_TRUE;
}
r = acquire_timed(self->rlock_lock, timeout);
if (r == PY_LOCK_ACQUIRED) {
assert(self->rlock_count == 0);
_Py_atomic_store_ullong_relaxed(&self->rlock_owner, tid);
self->rlock_count = 1;
}
else if (r == PY_LOCK_INTR) {

PyLockStatus r = _PyRecursiveMutex_LockTimed(&self->lock, timeout,
_PY_LOCK_HANDLE_SIGNALS | _PY_LOCK_DETACH);
if (r == PY_LOCK_INTR) {
return NULL;
}

Expand Down Expand Up @@ -1078,17 +1037,12 @@ static PyObject *
rlock_release(PyObject *op, PyObject *Py_UNUSED(ignored))
{
rlockobject *self = (rlockobject*)op;
PyThread_ident_t tid = PyThread_get_thread_ident_ex();

if (!rlock_is_owned_by(self, tid)) {
if (_PyRecursiveMutex_TryUnlock(&self->lock) < 0) {
PyErr_SetString(PyExc_RuntimeError,
"cannot release un-acquired lock");
return NULL;
}
if (--self->rlock_count == 0) {
_Py_atomic_store_ullong_relaxed(&self->rlock_owner, 0);
PyThread_release_lock(self->rlock_lock);
}
Py_RETURN_NONE;
}

Expand Down Expand Up @@ -1116,25 +1070,15 @@ rlock_acquire_restore(PyObject *op, PyObject *args)
{
rlockobject *self = (rlockobject*)op;
PyThread_ident_t owner;
unsigned long count;
int r = 1;
Py_ssize_t count;

if (!PyArg_ParseTuple(args, "(k" Py_PARSE_THREAD_IDENT_T "):_acquire_restore",
if (!PyArg_ParseTuple(args, "(n" Py_PARSE_THREAD_IDENT_T "):_acquire_restore",
&count, &owner))
return NULL;

if (!PyThread_acquire_lock(self->rlock_lock, 0)) {
Py_BEGIN_ALLOW_THREADS
r = PyThread_acquire_lock(self->rlock_lock, 1);
Py_END_ALLOW_THREADS
}
if (!r) {
PyErr_SetString(ThreadError, "couldn't acquire lock");
return NULL;
}
assert(self->rlock_count == 0);
_Py_atomic_store_ullong_relaxed(&self->rlock_owner, owner);
self->rlock_count = count;
_PyRecursiveMutex_Lock(&self->lock);
_Py_atomic_store_ullong_relaxed(&self->lock.thread, owner);
self->lock.level = (size_t)count - 1;
Py_RETURN_NONE;
}

Expand All @@ -1148,21 +1092,18 @@ static PyObject *
rlock_release_save(PyObject *op, PyObject *Py_UNUSED(ignored))
{
rlockobject *self = (rlockobject*)op;
PyThread_ident_t owner;
unsigned long count;

if (self->rlock_count == 0) {
if (!_PyRecursiveMutex_IsLockedByCurrentThread(&self->lock)) {
PyErr_SetString(PyExc_RuntimeError,
"cannot release un-acquired lock");
return NULL;
}

owner = self->rlock_owner;
count = self->rlock_count;
self->rlock_count = 0;
_Py_atomic_store_ullong_relaxed(&self->rlock_owner, 0);
PyThread_release_lock(self->rlock_lock);
return Py_BuildValue("k" Py_PARSE_THREAD_IDENT_T, count, owner);
PyThread_ident_t owner = self->lock.thread;
Py_ssize_t count = self->lock.level + 1;
self->lock.level = 0; // ensure the unlock releases the lock
_PyRecursiveMutex_Unlock(&self->lock);
return Py_BuildValue("n" Py_PARSE_THREAD_IDENT_T, count, owner);
}

PyDoc_STRVAR(rlock_release_save_doc,
Expand All @@ -1175,10 +1116,10 @@ static PyObject *
rlock_recursion_count(PyObject *op, PyObject *Py_UNUSED(ignored))
{
rlockobject *self = (rlockobject*)op;
PyThread_ident_t tid = PyThread_get_thread_ident_ex();
PyThread_ident_t owner =
_Py_atomic_load_ullong_relaxed(&self->rlock_owner);
return PyLong_FromUnsignedLong(owner == tid ? self->rlock_count : 0UL);
if (_PyRecursiveMutex_IsLockedByCurrentThread(&self->lock)) {
return PyLong_FromSize_t(self->lock.level + 1);
}
return PyLong_FromLong(0);
}

PyDoc_STRVAR(rlock_recursion_count_doc,
Expand All @@ -1191,12 +1132,8 @@ static PyObject *
rlock_is_owned(PyObject *op, PyObject *Py_UNUSED(ignored))
{
rlockobject *self = (rlockobject*)op;
PyThread_ident_t tid = PyThread_get_thread_ident_ex();

if (rlock_is_owned_by(self, tid)) {
Py_RETURN_TRUE;
}
Py_RETURN_FALSE;
long owned = _PyRecursiveMutex_IsLockedByCurrentThread(&self->lock);
return PyBool_FromLong(owned);
}

PyDoc_STRVAR(rlock_is_owned_doc,
Expand All @@ -1212,45 +1149,29 @@ rlock_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
if (self == NULL) {
return NULL;
}
self->in_weakreflist = NULL;
self->rlock_owner = 0;
self->rlock_count = 0;

self->rlock_lock = PyThread_allocate_lock();
if (self->rlock_lock == NULL) {
Py_DECREF(self);
PyErr_SetString(ThreadError, "can't allocate lock");
return NULL;
}
self->lock = (_PyRecursiveMutex){0};
return (PyObject *) self;
}

static PyObject *
rlock_repr(PyObject *op)
{
rlockobject *self = (rlockobject*)op;
PyThread_ident_t owner =
_Py_atomic_load_ullong_relaxed(&self->rlock_owner);
PyThread_ident_t owner = self->lock.thread;
size_t count = self->lock.level + 1;
return PyUnicode_FromFormat(
"<%s %s object owner=%" PY_FORMAT_THREAD_IDENT_T " count=%lu at %p>",
self->rlock_count ? "locked" : "unlocked",
"<%s %s object owner=%" PY_FORMAT_THREAD_IDENT_T " count=%zu at %p>",
owner ? "locked" : "unlocked",
Py_TYPE(self)->tp_name, owner,
self->rlock_count, self);
count, self);
}


#ifdef HAVE_FORK
static PyObject *
rlock__at_fork_reinit(rlockobject *self, PyObject *Py_UNUSED(args))
{
if (_PyThread_at_fork_reinit(&self->rlock_lock) < 0) {
PyErr_SetString(ThreadError, "failed to reinitialize lock at fork");
return NULL;
}

self->rlock_owner = 0;
self->rlock_count = 0;

self->lock = (_PyRecursiveMutex){0};
Py_RETURN_NONE;
}
#endif /* HAVE_FORK */
Expand Down Expand Up @@ -1281,18 +1202,12 @@ static PyMethodDef rlock_methods[] = {
};


static PyMemberDef rlock_type_members[] = {
{"__weaklistoffset__", Py_T_PYSSIZET, offsetof(rlockobject, in_weakreflist), Py_READONLY},
{NULL},
};

static PyType_Slot rlock_type_slots[] = {
{Py_tp_dealloc, rlock_dealloc},
{Py_tp_repr, rlock_repr},
{Py_tp_methods, rlock_methods},
{Py_tp_alloc, PyType_GenericAlloc},
{Py_tp_new, rlock_new},
{Py_tp_members, rlock_type_members},
{Py_tp_traverse, rlock_traverse},
{0, 0},
};
Expand All @@ -1301,7 +1216,7 @@ static PyType_Spec rlock_type_spec = {
.name = "_thread.RLock",
.basicsize = sizeof(rlockobject),
.flags = (Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE |
Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_IMMUTABLETYPE),
Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_IMMUTABLETYPE | Py_TPFLAGS_MANAGED_WEAKREF),
.slots = rlock_type_slots,
};

Expand Down
31 changes: 28 additions & 3 deletions Python/lock.c
Original file line number Diff line number Diff line change
Expand Up @@ -377,21 +377,46 @@ _PyRecursiveMutex_Lock(_PyRecursiveMutex *m)
assert(m->level == 0);
}

PyLockStatus
_PyRecursiveMutex_LockTimed(_PyRecursiveMutex *m, PyTime_t timeout, _PyLockFlags flags)
{
PyThread_ident_t thread = PyThread_get_thread_ident_ex();
if (recursive_mutex_is_owned_by(m, thread)) {
m->level++;
return PY_LOCK_ACQUIRED;
}
PyLockStatus s = _PyMutex_LockTimed(&m->mutex, timeout, flags);
if (s == PY_LOCK_ACQUIRED) {
_Py_atomic_store_ullong_relaxed(&m->thread, thread);
assert(m->level == 0);
}
return s;
}

void
_PyRecursiveMutex_Unlock(_PyRecursiveMutex *m)
{
if (_PyRecursiveMutex_TryUnlock(m) < 0) {
Py_FatalError("unlocking a recursive mutex that is not "
"owned by the current thread");
}
}

int
_PyRecursiveMutex_TryUnlock(_PyRecursiveMutex *m)
{
PyThread_ident_t thread = PyThread_get_thread_ident_ex();
if (!recursive_mutex_is_owned_by(m, thread)) {
Py_FatalError("unlocking a recursive mutex that is not owned by the"
" current thread");
return -1;
}
if (m->level > 0) {
m->level--;
return;
return 0;
}
assert(m->level == 0);
_Py_atomic_store_ullong_relaxed(&m->thread, 0);
PyMutex_Unlock(&m->mutex);
return 0;
}

#define _Py_WRITE_LOCKED 1
Expand Down
Loading