Skip to content

Commit

Permalink
bpo-30860: Consolidate stateful runtime globals. (#2594)
Browse files Browse the repository at this point in the history
* group the (stateful) runtime globals into various topical structs
* consolidate the topical structs under a single top-level _PyRuntimeState struct
* add a check-c-globals.py script that helps identify runtime globals

Other globals are excluded (see globals.txt and check-c-globals.py).
  • Loading branch information
ericsnowcurrently committed Sep 6, 2017
1 parent 501b324 commit 76d5abc
Show file tree
Hide file tree
Showing 40 changed files with 2,731 additions and 1,331 deletions.
4 changes: 4 additions & 0 deletions Include/Python.h
Expand Up @@ -133,4 +133,8 @@
#include "fileutils.h"
#include "pyfpe.h"

#ifdef Py_BUILD_CORE
#include "internal/_Python.h"
#endif

#endif /* !Py_PYTHON_H */
7 changes: 6 additions & 1 deletion Include/ceval.h
Expand Up @@ -93,7 +93,12 @@ PyAPI_FUNC(int) Py_GetRecursionLimit(void);
PyThreadState_GET()->overflowed = 0; \
} while(0)
PyAPI_FUNC(int) _Py_CheckRecursiveCall(const char *where);
PyAPI_DATA(int) _Py_CheckRecursionLimit;
#ifdef Py_BUILD_CORE
#define _Py_CheckRecursionLimit _PyRuntime.ceval.check_recursion_limit
#else
PyAPI_FUNC(int) _PyEval_CheckRecursionLimit(void);
#define _Py_CheckRecursionLimit _PyEval_CheckRecursionLimit()
#endif

#ifdef USE_STACKCHECK
/* With USE_STACKCHECK, we artificially decrement the recursion limit in order
Expand Down
16 changes: 16 additions & 0 deletions Include/internal/_Python.h
@@ -0,0 +1,16 @@
#ifndef _Py_PYTHON_H
#define _Py_PYTHON_H
/* Since this is a "meta-include" file, no #ifdef __cplusplus / extern "C" { */

/* Include all internal Python header files */

#ifndef Py_BUILD_CORE
#error "Internal headers are not available externally."
#endif

#include "_mem.h"
#include "_ceval.h"
#include "_warnings.h"
#include "_pystate.h"

#endif /* !_Py_PYTHON_H */
71 changes: 71 additions & 0 deletions Include/internal/_ceval.h
@@ -0,0 +1,71 @@
#ifndef _Py_CEVAL_H
#define _Py_CEVAL_H
#ifdef __cplusplus
extern "C" {
#endif

#include "ceval.h"
#include "compile.h"
#include "pyatomic.h"

#ifdef WITH_THREAD
#include "pythread.h"
#endif

struct _pending_calls {
unsigned long main_thread;
#ifdef WITH_THREAD
PyThread_type_lock lock;
/* Request for running pending calls. */
_Py_atomic_int calls_to_do;
/* Request for looking at the `async_exc` field of the current
thread state.
Guarded by the GIL. */
int async_exc;
#define NPENDINGCALLS 32
struct {
int (*func)(void *);
void *arg;
} calls[NPENDINGCALLS];
int first;
int last;
#else /* ! WITH_THREAD */
_Py_atomic_int calls_to_do;
#define NPENDINGCALLS 32
struct {
int (*func)(void *);
void *arg;
} calls[NPENDINGCALLS];
volatile int first;
volatile int last;
#endif /* WITH_THREAD */
};

#include "_gil.h"

struct _ceval_runtime_state {
int recursion_limit;
int check_recursion_limit;
/* Records whether tracing is on for any thread. Counts the number
of threads for which tstate->c_tracefunc is non-NULL, so if the
value is 0, we know we don't have to check this thread's
c_tracefunc. This speeds up the if statement in
PyEval_EvalFrameEx() after fast_next_opcode. */
int tracing_possible;
/* This single variable consolidates all requests to break out of
the fast path in the eval loop. */
_Py_atomic_int eval_breaker;
#ifdef WITH_THREAD
/* Request for dropping the GIL */
_Py_atomic_int gil_drop_request;
#endif
struct _pending_calls pending;
struct _gil_runtime_state gil;
};

PyAPI_FUNC(void) _PyEval_Initialize(struct _ceval_runtime_state *);

#ifdef __cplusplus
}
#endif
#endif /* !_Py_CEVAL_H */
91 changes: 91 additions & 0 deletions Include/internal/_condvar.h
@@ -0,0 +1,91 @@
#ifndef _CONDVAR_H_
#define _CONDVAR_H_

#ifndef _POSIX_THREADS
/* This means pthreads are not implemented in libc headers, hence the macro
not present in unistd.h. But they still can be implemented as an external
library (e.g. gnu pth in pthread emulation) */
# ifdef HAVE_PTHREAD_H
# include <pthread.h> /* _POSIX_THREADS */
# endif
#endif

#ifdef _POSIX_THREADS
/*
* POSIX support
*/
#define Py_HAVE_CONDVAR

#include <pthread.h>

#define PyMUTEX_T pthread_mutex_t
#define PyCOND_T pthread_cond_t

#elif defined(NT_THREADS)
/*
* Windows (XP, 2003 server and later, as well as (hopefully) CE) support
*
* Emulated condition variables ones that work with XP and later, plus
* example native support on VISTA and onwards.
*/
#define Py_HAVE_CONDVAR

/* include windows if it hasn't been done before */
#define WIN32_LEAN_AND_MEAN
#include <windows.h>

/* options */
/* non-emulated condition variables are provided for those that want
* to target Windows Vista. Modify this macro to enable them.
*/
#ifndef _PY_EMULATED_WIN_CV
#define _PY_EMULATED_WIN_CV 1 /* use emulated condition variables */
#endif

/* fall back to emulation if not targeting Vista */
#if !defined NTDDI_VISTA || NTDDI_VERSION < NTDDI_VISTA
#undef _PY_EMULATED_WIN_CV
#define _PY_EMULATED_WIN_CV 1
#endif

#if _PY_EMULATED_WIN_CV

typedef CRITICAL_SECTION PyMUTEX_T;

/* The ConditionVariable object. From XP onwards it is easily emulated
with a Semaphore.
Semaphores are available on Windows XP (2003 server) and later.
We use a Semaphore rather than an auto-reset event, because although
an auto-resent event might appear to solve the lost-wakeup bug (race
condition between releasing the outer lock and waiting) because it
maintains state even though a wait hasn't happened, there is still
a lost wakeup problem if more than one thread are interrupted in the
critical place. A semaphore solves that, because its state is
counted, not Boolean.
Because it is ok to signal a condition variable with no one
waiting, we need to keep track of the number of
waiting threads. Otherwise, the semaphore's state could rise
without bound. This also helps reduce the number of "spurious wakeups"
that would otherwise happen.
*/

typedef struct _PyCOND_T
{
HANDLE sem;
int waiting; /* to allow PyCOND_SIGNAL to be a no-op */
} PyCOND_T;

#else /* !_PY_EMULATED_WIN_CV */

/* Use native Win7 primitives if build target is Win7 or higher */

/* SRWLOCK is faster and better than CriticalSection */
typedef SRWLOCK PyMUTEX_T;

typedef CONDITION_VARIABLE PyCOND_T;

#endif /* _PY_EMULATED_WIN_CV */

#endif /* _POSIX_THREADS, NT_THREADS */

#endif /* _CONDVAR_H_ */
48 changes: 48 additions & 0 deletions Include/internal/_gil.h
@@ -0,0 +1,48 @@
#ifndef _Py_GIL_H
#define _Py_GIL_H
#ifdef __cplusplus
extern "C" {
#endif

#include "pyatomic.h"

#include "internal/_condvar.h"
#ifndef Py_HAVE_CONDVAR
#error You need either a POSIX-compatible or a Windows system!
#endif

/* Enable if you want to force the switching of threads at least
every `interval`. */
#undef FORCE_SWITCHING
#define FORCE_SWITCHING

struct _gil_runtime_state {
/* microseconds (the Python API uses seconds, though) */
unsigned long interval;
/* Last PyThreadState holding / having held the GIL. This helps us
know whether anyone else was scheduled after we dropped the GIL. */
_Py_atomic_address last_holder;
/* Whether the GIL is already taken (-1 if uninitialized). This is
atomic because it can be read without any lock taken in ceval.c. */
_Py_atomic_int locked;
/* Number of GIL switches since the beginning. */
unsigned long switch_number;
#ifdef WITH_THREAD
/* This condition variable allows one or several threads to wait
until the GIL is released. In addition, the mutex also protects
the above variables. */
PyCOND_T cond;
PyMUTEX_T mutex;
#ifdef FORCE_SWITCHING
/* This condition variable helps the GIL-releasing thread wait for
a GIL-awaiting thread to be scheduled and take the GIL. */
PyCOND_T switch_cond;
PyMUTEX_T switch_mutex;
#endif
#endif /* WITH_THREAD */
};

#ifdef __cplusplus
}
#endif
#endif /* !_Py_GIL_H */

0 comments on commit 76d5abc

Please sign in to comment.