diff --git a/Include/internal/pycore_ceval.h b/Include/internal/pycore_ceval.h index b9f2d7d1758537..f087879d0c103c 100644 --- a/Include/internal/pycore_ceval.h +++ b/Include/internal/pycore_ceval.h @@ -11,6 +11,19 @@ extern "C" { #include "pycore_atomic.h" #include "pythread.h" +struct _pending_call; + +struct _pending_call { + int (*func)(void *); + void *arg; + struct _pending_call *next; +}; + +// We technically do not need this limit around any longer since we +// moved from a circular queue to a linked list. However, having a +// size limit is still a good idea so we keep the one we already had. +#define NPENDINGCALLS 32 + struct _pending_calls { unsigned long main_thread; PyThread_type_lock lock; @@ -20,13 +33,9 @@ struct _pending_calls { thread state. Guarded by the GIL. */ int async_exc; -#define NPENDINGCALLS 32 - struct { - int (*func)(void *); - void *arg; - } calls[NPENDINGCALLS]; - int first; - int last; + int ncalls; + struct _pending_call *head; + struct _pending_call *last; }; #include "pycore_gil.h" diff --git a/Misc/NEWS.d/next/Core and Builtins/2018-12-11-15-12-03.bpo-35466.5xu737.rst b/Misc/NEWS.d/next/Core and Builtins/2018-12-11-15-12-03.bpo-35466.5xu737.rst new file mode 100644 index 00000000000000..43ae1bf3ce1344 --- /dev/null +++ b/Misc/NEWS.d/next/Core and Builtins/2018-12-11-15-12-03.bpo-35466.5xu737.rst @@ -0,0 +1 @@ +Simply the ceval pending calls list by using a linked list. diff --git a/Python/ceval.c b/Python/ceval.c index 3e82ceb952510e..8654915752b0e9 100644 --- a/Python/ceval.c +++ b/Python/ceval.c @@ -322,6 +322,48 @@ _PyEval_SignalReceived(void) SIGNAL_PENDING_SIGNALS(); } +static int +_add_pending_call(int (*func)(void *), void *arg) +{ + // TODO: Drop the limit? + if (_PyRuntime.ceval.pending.ncalls == NPENDINGCALLS) { + return -1; /* Queue full */ + } + + struct _pending_call *call = PyMem_RawMalloc(sizeof(struct _pending_call)); + if (call == NULL) { + return -1; + } + call->func = func; + call->arg = arg; + call->next = NULL; + + if (_PyRuntime.ceval.pending.head == NULL) { + _PyRuntime.ceval.pending.head = call; + } + else { + _PyRuntime.ceval.pending.last->next = call; + } + _PyRuntime.ceval.pending.last = call; + _PyRuntime.ceval.pending.ncalls++; + return 0; +} + +static void +_pop_pending_call(int (**func)(void *), void **arg) +{ + struct _pending_call *call = _PyRuntime.ceval.pending.head; + if (call == NULL) { + return; /* Queue empty */ + } + _PyRuntime.ceval.pending.head = call->next; + _PyRuntime.ceval.pending.ncalls--; + + *func = call->func; + *arg = call->arg; + PyMem_RawFree(call); +} + /* This implementation is thread-safe. It allows scheduling to be made from any thread, and even from an executing callback. @@ -330,7 +372,6 @@ _PyEval_SignalReceived(void) int Py_AddPendingCall(int (*func)(void *), void *arg) { - int i, j, result=0; PyThread_type_lock lock = _PyRuntime.ceval.pending.lock; /* try a few times for the lock. Since this mechanism is used @@ -345,6 +386,7 @@ Py_AddPendingCall(int (*func)(void *), void *arg) * this function is called before any bytecode evaluation takes place. */ if (lock != NULL) { + int i; for (i = 0; i<100; i++) { if (PyThread_acquire_lock(lock, NOWAIT_LOCK)) break; @@ -353,20 +395,13 @@ Py_AddPendingCall(int (*func)(void *), void *arg) return -1; } - i = _PyRuntime.ceval.pending.last; - j = (i + 1) % NPENDINGCALLS; - if (j == _PyRuntime.ceval.pending.first) { - result = -1; /* Queue full */ - } else { - _PyRuntime.ceval.pending.calls[i].func = func; - _PyRuntime.ceval.pending.calls[i].arg = arg; - _PyRuntime.ceval.pending.last = j; - } + int res = _add_pending_call(func, arg); + /* signal main loop */ SIGNAL_PENDING_CALLS(); if (lock != NULL) PyThread_release_lock(lock); - return result; + return res; } static int @@ -420,24 +455,18 @@ make_pending_calls(void) /* perform a bounded number of calls, in case of recursion */ for (int i=0; i