Skip to content

Commit

Permalink
bpo-40513: Per-interpreter gil_drop_request (GH-19927)
Browse files Browse the repository at this point in the history
Move gil_drop_request member from _PyRuntimeState.ceval to
PyInterpreterState.ceval.
  • Loading branch information
vstinner committed May 5, 2020
1 parent 4e01946 commit 0b1e330
Show file tree
Hide file tree
Showing 4 changed files with 45 additions and 47 deletions.
2 changes: 2 additions & 0 deletions Include/internal/pycore_interp.h
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,8 @@ struct _ceval_state {
/* This single variable consolidates all requests to break out of
the fast path in the eval loop. */
_Py_atomic_int eval_breaker;
/* Request for dropping the GIL */
_Py_atomic_int gil_drop_request;
struct _pending_calls pending;
/* Request for checking signals. */
_Py_atomic_int signals_pending;
Expand Down
2 changes: 0 additions & 2 deletions Include/internal/pycore_runtime.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,6 @@ extern "C" {

struct _ceval_runtime_state {
int recursion_limit;
/* Request for dropping the GIL */
_Py_atomic_int gil_drop_request;
struct _gil_runtime_state gil;
};

Expand Down
75 changes: 36 additions & 39 deletions Python/ceval.c
Original file line number Diff line number Diff line change
Expand Up @@ -143,77 +143,70 @@ is_tstate_valid(PyThreadState *tstate)
the GIL eventually anyway. */
static inline void
COMPUTE_EVAL_BREAKER(PyInterpreterState *interp,
struct _ceval_runtime_state *ceval,
struct _ceval_state *ceval2)
struct _ceval_state *ceval)
{
_Py_atomic_store_relaxed(&ceval2->eval_breaker,
_Py_atomic_store_relaxed(&ceval->eval_breaker,
_Py_atomic_load_relaxed(&ceval->gil_drop_request)
| (_Py_atomic_load_relaxed(&ceval2->signals_pending)
| (_Py_atomic_load_relaxed(&ceval->signals_pending)
&& _Py_ThreadCanHandleSignals(interp))
| (_Py_atomic_load_relaxed(&ceval2->pending.calls_to_do)
| (_Py_atomic_load_relaxed(&ceval->pending.calls_to_do)
&& _Py_ThreadCanHandlePendingCalls())
| ceval2->pending.async_exc);
| ceval->pending.async_exc);
}


static inline void
SET_GIL_DROP_REQUEST(PyInterpreterState *interp)
{
struct _ceval_runtime_state *ceval = &interp->runtime->ceval;
struct _ceval_state *ceval2 = &interp->ceval;
struct _ceval_state *ceval = &interp->ceval;
_Py_atomic_store_relaxed(&ceval->gil_drop_request, 1);
_Py_atomic_store_relaxed(&ceval2->eval_breaker, 1);
_Py_atomic_store_relaxed(&ceval->eval_breaker, 1);
}


static inline void
RESET_GIL_DROP_REQUEST(PyInterpreterState *interp)
{
struct _ceval_runtime_state *ceval = &interp->runtime->ceval;
struct _ceval_state *ceval2 = &interp->ceval;
struct _ceval_state *ceval = &interp->ceval;
_Py_atomic_store_relaxed(&ceval->gil_drop_request, 0);
COMPUTE_EVAL_BREAKER(interp, ceval, ceval2);
COMPUTE_EVAL_BREAKER(interp, ceval);
}


static inline void
SIGNAL_PENDING_CALLS(PyInterpreterState *interp)
{
struct _ceval_runtime_state *ceval = &interp->runtime->ceval;
struct _ceval_state *ceval2 = &interp->ceval;
_Py_atomic_store_relaxed(&ceval2->pending.calls_to_do, 1);
COMPUTE_EVAL_BREAKER(interp, ceval, ceval2);
struct _ceval_state *ceval = &interp->ceval;
_Py_atomic_store_relaxed(&ceval->pending.calls_to_do, 1);
COMPUTE_EVAL_BREAKER(interp, ceval);
}


static inline void
UNSIGNAL_PENDING_CALLS(PyInterpreterState *interp)
{
struct _ceval_runtime_state *ceval = &interp->runtime->ceval;
struct _ceval_state *ceval2 = &interp->ceval;
_Py_atomic_store_relaxed(&ceval2->pending.calls_to_do, 0);
COMPUTE_EVAL_BREAKER(interp, ceval, ceval2);
struct _ceval_state *ceval = &interp->ceval;
_Py_atomic_store_relaxed(&ceval->pending.calls_to_do, 0);
COMPUTE_EVAL_BREAKER(interp, ceval);
}


static inline void
SIGNAL_PENDING_SIGNALS(PyInterpreterState *interp)
{
struct _ceval_runtime_state *ceval = &interp->runtime->ceval;
struct _ceval_state *ceval2 = &interp->ceval;
_Py_atomic_store_relaxed(&ceval2->signals_pending, 1);
struct _ceval_state *ceval = &interp->ceval;
_Py_atomic_store_relaxed(&ceval->signals_pending, 1);
/* eval_breaker is not set to 1 if thread_can_handle_signals() is false */
COMPUTE_EVAL_BREAKER(interp, ceval, ceval2);
COMPUTE_EVAL_BREAKER(interp, ceval);
}


static inline void
UNSIGNAL_PENDING_SIGNALS(PyInterpreterState *interp)
{
struct _ceval_runtime_state *ceval = &interp->runtime->ceval;
struct _ceval_state *ceval2 = &interp->ceval;
_Py_atomic_store_relaxed(&ceval2->signals_pending, 0);
COMPUTE_EVAL_BREAKER(interp, ceval, ceval2);
struct _ceval_state *ceval = &interp->ceval;
_Py_atomic_store_relaxed(&ceval->signals_pending, 0);
COMPUTE_EVAL_BREAKER(interp, ceval);
}


Expand All @@ -229,10 +222,9 @@ SIGNAL_ASYNC_EXC(PyInterpreterState *interp)
static inline void
UNSIGNAL_ASYNC_EXC(PyInterpreterState *interp)
{
struct _ceval_runtime_state *ceval = &interp->runtime->ceval;
struct _ceval_state *ceval2 = &interp->ceval;
ceval2->pending.async_exc = 0;
COMPUTE_EVAL_BREAKER(interp, ceval, ceval2);
struct _ceval_state *ceval = &interp->ceval;
ceval->pending.async_exc = 0;
COMPUTE_EVAL_BREAKER(interp, ceval);
}


Expand Down Expand Up @@ -357,17 +349,19 @@ PyEval_ReleaseLock(void)
{
_PyRuntimeState *runtime = &_PyRuntime;
PyThreadState *tstate = _PyRuntimeState_GetThreadState(runtime);
struct _ceval_state *ceval2 = &tstate->interp->ceval;
/* This function must succeed when the current thread state is NULL.
We therefore avoid PyThreadState_Get() which dumps a fatal error
in debug mode. */
drop_gil(&runtime->ceval, tstate);
drop_gil(&runtime->ceval, ceval2, tstate);
}

void
_PyEval_ReleaseLock(PyThreadState *tstate)
{
struct _ceval_runtime_state *ceval = &tstate->interp->runtime->ceval;
drop_gil(ceval, tstate);
struct _ceval_state *ceval2 = &tstate->interp->ceval;
drop_gil(ceval, ceval2, tstate);
}

void
Expand All @@ -393,7 +387,9 @@ PyEval_ReleaseThread(PyThreadState *tstate)
if (new_tstate != tstate) {
Py_FatalError("wrong thread state");
}
drop_gil(&runtime->ceval, tstate);
struct _ceval_runtime_state *ceval = &runtime->ceval;
struct _ceval_state *ceval2 = &tstate->interp->ceval;
drop_gil(ceval, ceval2, tstate);
}

#ifdef HAVE_FORK
Expand Down Expand Up @@ -439,13 +435,14 @@ PyThreadState *
PyEval_SaveThread(void)
{
_PyRuntimeState *runtime = &_PyRuntime;
struct _ceval_runtime_state *ceval = &runtime->ceval;

PyThreadState *tstate = _PyThreadState_Swap(&runtime->gilstate, NULL);
ensure_tstate_not_null(__func__, tstate);

struct _ceval_runtime_state *ceval = &runtime->ceval;
struct _ceval_state *ceval2 = &tstate->interp->ceval;
assert(gil_created(&ceval->gil));
drop_gil(ceval, tstate);
drop_gil(ceval, ceval2, tstate);
return tstate;
}

Expand Down Expand Up @@ -847,12 +844,12 @@ eval_frame_handle_pending(PyThreadState *tstate)
}

/* GIL drop request */
if (_Py_atomic_load_relaxed(&ceval->gil_drop_request)) {
if (_Py_atomic_load_relaxed(&ceval2->gil_drop_request)) {
/* Give another thread a chance */
if (_PyThreadState_Swap(&runtime->gilstate, NULL) != tstate) {
Py_FatalError("tstate mix-up");
}
drop_gil(ceval, tstate);
drop_gil(ceval, ceval2, tstate);

/* Other threads may run now */

Expand Down
13 changes: 7 additions & 6 deletions Python/ceval_gil.h
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,8 @@ static void recreate_gil(struct _gil_runtime_state *gil)
}

static void
drop_gil(struct _ceval_runtime_state *ceval, PyThreadState *tstate)
drop_gil(struct _ceval_runtime_state *ceval, struct _ceval_state *ceval2,
PyThreadState *tstate)
{
struct _gil_runtime_state *gil = &ceval->gil;
if (!_Py_atomic_load_relaxed(&gil->locked)) {
Expand All @@ -163,7 +164,7 @@ drop_gil(struct _ceval_runtime_state *ceval, PyThreadState *tstate)
MUTEX_UNLOCK(gil->mutex);

#ifdef FORCE_SWITCHING
if (_Py_atomic_load_relaxed(&ceval->gil_drop_request) && tstate != NULL) {
if (_Py_atomic_load_relaxed(&ceval2->gil_drop_request) && tstate != NULL) {
MUTEX_LOCK(gil->switch_mutex);
/* Not switched yet => wait */
if (((PyThreadState*)_Py_atomic_load_relaxed(&gil->last_holder)) == tstate)
Expand Down Expand Up @@ -226,6 +227,7 @@ take_gil(PyThreadState *tstate)
assert(is_tstate_valid(tstate));
PyInterpreterState *interp = tstate->interp;
struct _ceval_runtime_state *ceval = &interp->runtime->ceval;
struct _ceval_state *ceval2 = &interp->ceval;
struct _gil_runtime_state *gil = &ceval->gil;

/* Check that _PyEval_InitThreads() was called to create the lock */
Expand Down Expand Up @@ -289,12 +291,12 @@ take_gil(PyThreadState *tstate)
in take_gil() while the main thread called
wait_for_thread_shutdown() from Py_Finalize(). */
MUTEX_UNLOCK(gil->mutex);
drop_gil(ceval, tstate);
drop_gil(ceval, ceval2, tstate);
PyThread_exit_thread();
}
assert(is_tstate_valid(tstate));

if (_Py_atomic_load_relaxed(&ceval->gil_drop_request)) {
if (_Py_atomic_load_relaxed(&ceval2->gil_drop_request)) {
RESET_GIL_DROP_REQUEST(interp);
}
else {
Expand All @@ -303,8 +305,7 @@ take_gil(PyThreadState *tstate)
handle signals.
Note: RESET_GIL_DROP_REQUEST() calls COMPUTE_EVAL_BREAKER(). */
struct _ceval_state *ceval2 = &interp->ceval;
COMPUTE_EVAL_BREAKER(interp, ceval, ceval2);
COMPUTE_EVAL_BREAKER(interp, ceval2);
}

/* Don't access tstate if the thread must exit */
Expand Down

0 comments on commit 0b1e330

Please sign in to comment.