Commit 76d5abc8 authored by Eric Snow's avatar Eric Snow Committed by GitHub

bpo-30860: Consolidate stateful runtime globals. (#2594)

* group the (stateful) runtime globals into various topical structs
* consolidate the topical structs under a single top-level _PyRuntimeState struct
* add a check-c-globals.py script that helps identify runtime globals

Other globals are excluded (see globals.txt and check-c-globals.py).
parent 501b324d
......@@ -133,4 +133,8 @@
#include "fileutils.h"
#include "pyfpe.h"
#ifdef Py_BUILD_CORE
#include "internal/_Python.h"
#endif
#endif /* !Py_PYTHON_H */
......@@ -93,7 +93,12 @@ PyAPI_FUNC(int) Py_GetRecursionLimit(void);
PyThreadState_GET()->overflowed = 0; \
} while(0)
PyAPI_FUNC(int) _Py_CheckRecursiveCall(const char *where);
PyAPI_DATA(int) _Py_CheckRecursionLimit;
#ifdef Py_BUILD_CORE
#define _Py_CheckRecursionLimit _PyRuntime.ceval.check_recursion_limit
#else
PyAPI_FUNC(int) _PyEval_CheckRecursionLimit(void);
#define _Py_CheckRecursionLimit _PyEval_CheckRecursionLimit()
#endif
#ifdef USE_STACKCHECK
/* With USE_STACKCHECK, we artificially decrement the recursion limit in order
......
#ifndef _Py_PYTHON_H
#define _Py_PYTHON_H
/* Since this is a "meta-include" file, no #ifdef __cplusplus / extern "C" { */
/* Include all internal Python header files */
#ifndef Py_BUILD_CORE
#error "Internal headers are not available externally."
#endif
#include "_mem.h"
#include "_ceval.h"
#include "_warnings.h"
#include "_pystate.h"
#endif /* !_Py_PYTHON_H */
#ifndef _Py_CEVAL_H
#define _Py_CEVAL_H
#ifdef __cplusplus
extern "C" {
#endif
#include "ceval.h"
#include "compile.h"
#include "pyatomic.h"
#ifdef WITH_THREAD
#include "pythread.h"
#endif
struct _pending_calls {
unsigned long main_thread;
#ifdef WITH_THREAD
PyThread_type_lock lock;
/* Request for running pending calls. */
_Py_atomic_int calls_to_do;
/* Request for looking at the `async_exc` field of the current
thread state.
Guarded by the GIL. */
int async_exc;
#define NPENDINGCALLS 32
struct {
int (*func)(void *);
void *arg;
} calls[NPENDINGCALLS];
int first;
int last;
#else /* ! WITH_THREAD */
_Py_atomic_int calls_to_do;
#define NPENDINGCALLS 32
struct {
int (*func)(void *);
void *arg;
} calls[NPENDINGCALLS];
volatile int first;
volatile int last;
#endif /* WITH_THREAD */
};
#include "_gil.h"
struct _ceval_runtime_state {
int recursion_limit;
int check_recursion_limit;
/* Records whether tracing is on for any thread. Counts the number
of threads for which tstate->c_tracefunc is non-NULL, so if the
value is 0, we know we don't have to check this thread's
c_tracefunc. This speeds up the if statement in
PyEval_EvalFrameEx() after fast_next_opcode. */
int tracing_possible;
/* This single variable consolidates all requests to break out of
the fast path in the eval loop. */
_Py_atomic_int eval_breaker;
#ifdef WITH_THREAD
/* Request for dropping the GIL */
_Py_atomic_int gil_drop_request;
#endif
struct _pending_calls pending;
struct _gil_runtime_state gil;
};
PyAPI_FUNC(void) _PyEval_Initialize(struct _ceval_runtime_state *);
#ifdef __cplusplus
}
#endif
#endif /* !_Py_CEVAL_H */
#ifndef _CONDVAR_H_
#define _CONDVAR_H_
#ifndef _POSIX_THREADS
/* This means pthreads are not implemented in libc headers, hence the macro
not present in unistd.h. But they still can be implemented as an external
library (e.g. gnu pth in pthread emulation) */
# ifdef HAVE_PTHREAD_H
# include <pthread.h> /* _POSIX_THREADS */
# endif
#endif
#ifdef _POSIX_THREADS
/*
* POSIX support
*/
#define Py_HAVE_CONDVAR
#include <pthread.h>
#define PyMUTEX_T pthread_mutex_t
#define PyCOND_T pthread_cond_t
#elif defined(NT_THREADS)
/*
* Windows (XP, 2003 server and later, as well as (hopefully) CE) support
*
* Emulated condition variables ones that work with XP and later, plus
* example native support on VISTA and onwards.
*/
#define Py_HAVE_CONDVAR
/* include windows if it hasn't been done before */
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
/* options */
/* non-emulated condition variables are provided for those that want
* to target Windows Vista. Modify this macro to enable them.
*/
#ifndef _PY_EMULATED_WIN_CV
#define _PY_EMULATED_WIN_CV 1 /* use emulated condition variables */
#endif
/* fall back to emulation if not targeting Vista */
#if !defined NTDDI_VISTA || NTDDI_VERSION < NTDDI_VISTA
#undef _PY_EMULATED_WIN_CV
#define _PY_EMULATED_WIN_CV 1
#endif
#if _PY_EMULATED_WIN_CV
typedef CRITICAL_SECTION PyMUTEX_T;
/* The ConditionVariable object. From XP onwards it is easily emulated
with a Semaphore.
Semaphores are available on Windows XP (2003 server) and later.
We use a Semaphore rather than an auto-reset event, because although
an auto-resent event might appear to solve the lost-wakeup bug (race
condition between releasing the outer lock and waiting) because it
maintains state even though a wait hasn't happened, there is still
a lost wakeup problem if more than one thread are interrupted in the
critical place. A semaphore solves that, because its state is
counted, not Boolean.
Because it is ok to signal a condition variable with no one
waiting, we need to keep track of the number of
waiting threads. Otherwise, the semaphore's state could rise
without bound. This also helps reduce the number of "spurious wakeups"
that would otherwise happen.
*/
typedef struct _PyCOND_T
{
HANDLE sem;
int waiting; /* to allow PyCOND_SIGNAL to be a no-op */
} PyCOND_T;
#else /* !_PY_EMULATED_WIN_CV */
/* Use native Win7 primitives if build target is Win7 or higher */
/* SRWLOCK is faster and better than CriticalSection */
typedef SRWLOCK PyMUTEX_T;
typedef CONDITION_VARIABLE PyCOND_T;
#endif /* _PY_EMULATED_WIN_CV */
#endif /* _POSIX_THREADS, NT_THREADS */
#endif /* _CONDVAR_H_ */
#ifndef _Py_GIL_H
#define _Py_GIL_H
#ifdef __cplusplus
extern "C" {
#endif
#include "pyatomic.h"
#include "internal/_condvar.h"
#ifndef Py_HAVE_CONDVAR
#error You need either a POSIX-compatible or a Windows system!
#endif
/* Enable if you want to force the switching of threads at least
every `interval`. */
#undef FORCE_SWITCHING
#define FORCE_SWITCHING
struct _gil_runtime_state {
/* microseconds (the Python API uses seconds, though) */
unsigned long interval;
/* Last PyThreadState holding / having held the GIL. This helps us
know whether anyone else was scheduled after we dropped the GIL. */
_Py_atomic_address last_holder;
/* Whether the GIL is already taken (-1 if uninitialized). This is
atomic because it can be read without any lock taken in ceval.c. */
_Py_atomic_int locked;
/* Number of GIL switches since the beginning. */
unsigned long switch_number;
#ifdef WITH_THREAD
/* This condition variable allows one or several threads to wait
until the GIL is released. In addition, the mutex also protects
the above variables. */
PyCOND_T cond;
PyMUTEX_T mutex;
#ifdef FORCE_SWITCHING
/* This condition variable helps the GIL-releasing thread wait for
a GIL-awaiting thread to be scheduled and take the GIL. */
PyCOND_T switch_cond;
PyMUTEX_T switch_mutex;
#endif
#endif /* WITH_THREAD */
};
#ifdef __cplusplus
}
#endif
#endif /* !_Py_GIL_H */
#ifndef _Py_MEM_H
#define _Py_MEM_H
#ifdef __cplusplus
extern "C" {
#endif
#include "objimpl.h"
#include "pymem.h"
#ifdef WITH_PYMALLOC
#include "_pymalloc.h"
#endif
/* Low-level memory runtime state */
struct _pymem_runtime_state {
struct _allocator_runtime_state {
PyMemAllocatorEx mem;
PyMemAllocatorEx obj;
PyMemAllocatorEx raw;
} allocators;
#ifdef WITH_PYMALLOC
/* Array of objects used to track chunks of memory (arenas). */
struct arena_object* arenas;
/* The head of the singly-linked, NULL-terminated list of available
arena_objects. */
struct arena_object* unused_arena_objects;
/* The head of the doubly-linked, NULL-terminated at each end,
list of arena_objects associated with arenas that have pools
available. */
struct arena_object* usable_arenas;
/* Number of slots currently allocated in the `arenas` vector. */
unsigned int maxarenas;
/* Number of arenas allocated that haven't been free()'d. */
size_t narenas_currently_allocated;
/* High water mark (max value ever seen) for
* narenas_currently_allocated. */
size_t narenas_highwater;
/* Total number of times malloc() called to allocate an arena. */
size_t ntimes_arena_allocated;
poolp usedpools[MAX_POOLS];
Py_ssize_t num_allocated_blocks;
size_t serialno; /* incremented on each debug {m,re}alloc */
#endif /* WITH_PYMALLOC */
};
PyAPI_FUNC(void) _PyMem_Initialize(struct _pymem_runtime_state *);
/* High-level memory runtime state */
struct _pyobj_runtime_state {
PyObjectArenaAllocator allocator_arenas;
};
PyAPI_FUNC(void) _PyObject_Initialize(struct _pyobj_runtime_state *);
/* GC runtime state */
/* If we change this, we need to change the default value in the
signature of gc.collect. */
#define NUM_GENERATIONS 3
/*
NOTE: about the counting of long-lived objects.
To limit the cost of garbage collection, there are two strategies;
- make each collection faster, e.g. by scanning fewer objects
- do less collections
This heuristic is about the latter strategy.
In addition to the various configurable thresholds, we only trigger a
full collection if the ratio
long_lived_pending / long_lived_total
is above a given value (hardwired to 25%).
The reason is that, while "non-full" collections (i.e., collections of
the young and middle generations) will always examine roughly the same
number of objects -- determined by the aforementioned thresholds --,
the cost of a full collection is proportional to the total number of
long-lived objects, which is virtually unbounded.
Indeed, it has been remarked that doing a full collection every
<constant number> of object creations entails a dramatic performance
degradation in workloads which consist in creating and storing lots of
long-lived objects (e.g. building a large list of GC-tracked objects would
show quadratic performance, instead of linear as expected: see issue #4074).
Using the above ratio, instead, yields amortized linear performance in
the total number of objects (the effect of which can be summarized
thusly: "each full garbage collection is more and more costly as the
number of objects grows, but we do fewer and fewer of them").
This heuristic was suggested by Martin von Löwis on python-dev in
June 2008. His original analysis and proposal can be found at:
http://mail.python.org/pipermail/python-dev/2008-June/080579.html
*/
/*
NOTE: about untracking of mutable objects.
Certain types of container cannot participate in a reference cycle, and
so do not need to be tracked by the garbage collector. Untracking these
objects reduces the cost of garbage collections. However, determining
which objects may be untracked is not free, and the costs must be
weighed against the benefits for garbage collection.
There are two possible strategies for when to untrack a container:
i) When the container is created.
ii) When the container is examined by the garbage collector.
Tuples containing only immutable objects (integers, strings etc, and
recursively, tuples of immutable objects) do not need to be tracked.
The interpreter creates a large number of tuples, many of which will
not survive until garbage collection. It is therefore not worthwhile
to untrack eligible tuples at creation time.
Instead, all tuples except the empty tuple are tracked when created.
During garbage collection it is determined whether any surviving tuples
can be untracked. A tuple can be untracked if all of its contents are
already not tracked. Tuples are examined for untracking in all garbage
collection cycles. It may take more than one cycle to untrack a tuple.
Dictionaries containing only immutable objects also do not need to be
tracked. Dictionaries are untracked when created. If a tracked item is
inserted into a dictionary (either as a key or value), the dictionary
becomes tracked. During a full garbage collection (all generations),
the collector will untrack any dictionaries whose contents are not
tracked.
The module provides the python function is_tracked(obj), which returns
the CURRENT tracking status of the object. Subsequent garbage
collections may change the tracking status of the object.
Untracking of certain containers was introduced in issue #4688, and
the algorithm was refined in response to issue #14775.
*/
struct gc_generation {
PyGC_Head head;
int threshold; /* collection threshold */
int count; /* count of allocations or collections of younger
generations */
};
/* Running stats per generation */
struct gc_generation_stats {
/* total number of collections */
Py_ssize_t collections;
/* total number of collected objects */
Py_ssize_t collected;
/* total number of uncollectable objects (put into gc.garbage) */
Py_ssize_t uncollectable;
};
struct _gc_runtime_state {
/* List of objects that still need to be cleaned up, singly linked
* via their gc headers' gc_prev pointers. */
PyObject *trash_delete_later;
/* Current call-stack depth of tp_dealloc calls. */
int trash_delete_nesting;
int enabled;
int debug;
/* linked lists of container objects */
struct gc_generation generations[NUM_GENERATIONS];
PyGC_Head *generation0;
struct gc_generation_stats generation_stats[NUM_GENERATIONS];
/* true if we are currently running the collector */
int collecting;
/* list of uncollectable objects */
PyObject *garbage;
/* a list of callbacks to be invoked when collection is performed */
PyObject *callbacks;
/* This is the number of objects that survived the last full
collection. It approximates the number of long lived objects
tracked by the GC.
(by "full collection", we mean a collection of the oldest
generation). */
Py_ssize_t long_lived_total;
/* This is the number of objects that survived all "non-full"
collections, and are awaiting to undergo a full collection for
the first time. */
Py_ssize_t long_lived_pending;
};
PyAPI_FUNC(void) _PyGC_Initialize(struct _gc_runtime_state *);
#define _PyGC_generation0 _PyRuntime.gc.generation0
#ifdef __cplusplus
}
#endif
#endif /* !_Py_MEM_H */
This diff is collapsed.
#ifndef _Py_PYSTATE_H
#define _Py_PYSTATE_H
#ifdef __cplusplus
extern "C" {
#endif
#include "pystate.h"
#include "pyatomic.h"
#ifdef WITH_THREAD
#include "pythread.h"
#endif
#include "_mem.h"
#include "_ceval.h"
#include "_warnings.h"
/* GIL state */
struct _gilstate_runtime_state {
int check_enabled;
/* Assuming the current thread holds the GIL, this is the
PyThreadState for the current thread. */
_Py_atomic_address tstate_current;
PyThreadFrameGetter getframe;
#ifdef WITH_THREAD
/* The single PyInterpreterState used by this process'
GILState implementation
*/
/* TODO: Given interp_main, it may be possible to kill this ref */
PyInterpreterState *autoInterpreterState;
int autoTLSkey;
#endif /* WITH_THREAD */
};
/* hook for PyEval_GetFrame(), requested for Psyco */
#define _PyThreadState_GetFrame _PyRuntime.gilstate.getframe
/* Issue #26558: Flag to disable PyGILState_Check().
If set to non-zero, PyGILState_Check() always return 1. */
#define _PyGILState_check_enabled _PyRuntime.gilstate.check_enabled
/* Full Python runtime state */
typedef struct pyruntimestate {
int initialized;
int core_initialized;
PyThreadState *finalizing;
struct pyinterpreters {
#ifdef WITH_THREAD
PyThread_type_lock mutex;
#endif
PyInterpreterState *head;
PyInterpreterState *main;
/* _next_interp_id is an auto-numbered sequence of small
integers. It gets initialized in _PyInterpreterState_Init(),
which is called in Py_Initialize(), and used in
PyInterpreterState_New(). A negative interpreter ID
indicates an error occurred. The main interpreter will
always have an ID of 0. Overflow results in a RuntimeError.
If that becomes a problem later then we can adjust, e.g. by
using a Python int. */
int64_t next_id;
} interpreters;
#define NEXITFUNCS 32
void (*exitfuncs[NEXITFUNCS])(void);
int nexitfuncs;
void (*pyexitfunc)(void);
struct _pyobj_runtime_state obj;
struct _gc_runtime_state gc;
struct _pymem_runtime_state mem;
struct _warnings_runtime_state warnings;
struct _ceval_runtime_state ceval;
struct _gilstate_runtime_state gilstate;
// XXX Consolidate globals found via the check-c-globals script.
} _PyRuntimeState;
PyAPI_DATA(_PyRuntimeState) _PyRuntime;
PyAPI_FUNC(void) _PyRuntimeState_Init(_PyRuntimeState *);
PyAPI_FUNC(void) _PyRuntimeState_Fini(_PyRuntimeState *);
PyAPI_FUNC(void) _PyInterpreterState_Enable(_PyRuntimeState *);
#ifdef __cplusplus
}
#endif
#endif /* !_Py_PYSTATE_H */
#ifndef _Py_WARNINGS_H
#define _Py_WARNINGS_H
#ifdef __cplusplus
extern "C" {
#endif
#include "object.h"
struct _warnings_runtime_state {
/* Both 'filters' and 'onceregistry' can be set in warnings.py;
get_warnings_attr() will reset these variables accordingly. */
PyObject *filters; /* List */
PyObject *once_registry; /* Dict */
PyObject *default_action; /* String */
long filters_version;
};
#ifdef __cplusplus
}
#endif
#endif /* !_Py_WARNINGS_H */
......@@ -1038,8 +1038,6 @@ with the call stack never exceeding a depth of PyTrash_UNWIND_LEVEL.
Kept for binary compatibility of extensions using the stable ABI. */
PyAPI_FUNC(void) _PyTrash_deposit_object(PyObject*);
PyAPI_FUNC(void) _PyTrash_destroy_chain(void);
PyAPI_DATA(int) _PyTrash_delete_nesting;
PyAPI_DATA(PyObject *) _PyTrash_delete_later;
#endif /* !Py_LIMITED_API */
/* The new thread-safe private API, invoked by the macros below. */
......
......@@ -119,7 +119,10 @@ PyAPI_FUNC(void) _PyType_Fini(void);
PyAPI_FUNC(void) _Py_HashRandomization_Fini(void);
PyAPI_FUNC(void) PyAsyncGen_Fini(void);
PyAPI_DATA(PyThreadState *) _Py_Finalizing;
#define _Py_IS_FINALIZING() \
(_PyRuntime.finalizing != NULL)
#define _Py_CURRENTLY_FINALIZING(tstate) \
(_PyRuntime.finalizing == tstate)
#endif
/* Signals */
......
......@@ -29,9 +29,10 @@ typedef struct {
int use_hash_seed;
unsigned long hash_seed;
int _disable_importlib; /* Needed by freeze_importlib */
char *allocator;
} _PyCoreConfig;
#define _PyCoreConfig_INIT {0, -1, 0, 0}
#define _PyCoreConfig_INIT {0, -1, 0, 0, NULL}
/* Placeholders while working on the new configuration API
*
......@@ -57,6 +58,19 @@ typedef struct _is {
PyObject *builtins;
PyObject *importlib;
/* Used in Python/sysmodule.c. */
int check_interval;
PyObject *warnoptions;
PyObject *xoptions;
/* Used in Modules/_threadmodule.c. */
long num_threads;
/* Support for runtime thread stack size tuning.
A value of 0 means using the platform's default stack size
or the size specified by the THREAD_STACK_SIZE macro. */
/* Used in Python/thread.c. */
size_t pythread_stacksize;
PyObject *codec_search_path;
PyObject *codec_search_cache;
PyObject *codec_error_registry;
......@@ -185,9 +199,6 @@ typedef struct _ts {
#endif
#ifndef Py_LIMITED_API
PyAPI_FUNC(void) _PyInterpreterState_Init(void);
#endif /* !Py_LIMITED_API */
PyAPI_FUNC(PyInterpreterState *) PyInterpreterState_New(void);
PyAPI_FUNC(void) PyInterpreterState_Clear(PyInterpreterState *);
PyAPI_FUNC(void) PyInterpreterState_Delete(PyInterpreterState *);
......@@ -246,7 +257,7 @@ PyAPI_FUNC(int) PyThreadState_SetAsyncExc(unsigned long, PyObject *);
/* Assuming the current thread holds the GIL, this is the
PyThreadState for the current thread. */
#ifdef Py_BUILD_CORE
PyAPI_DATA(_Py_atomic_address) _PyThreadState_Current;
# define _PyThreadState_Current _PyRuntime.gilstate.tstate_current
# define PyThreadState_GET() \
((PyThreadState*)_Py_atomic_load_relaxed(&_PyThreadState_Current))
#else
......@@ -301,10 +312,6 @@ PyAPI_FUNC(void) PyGILState_Release(PyGILState_STATE);
PyAPI_FUNC(PyThreadState *) PyGILState_GetThisThreadState(void);
#ifndef Py_LIMITED_API
/* Issue #26558: Flag to disable PyGILState_Check().
If set to non-zero, PyGILState_Check() always return 1. */
PyAPI_DATA(int) _PyGILState_check_enabled;
/* Helper/diagnostic function - return 1 if the current thread
currently holds the GIL, 0 otherwise.
......@@ -340,11 +347,6 @@ PyAPI_FUNC(PyThreadState *) PyThreadState_Next(PyThreadState *);
typedef struct _frame *(*PyThreadFrameGetter)(PyThreadState *self_);
#endif
/* hook for PyEval_GetFrame(), requested for Psyco */
#ifndef Py_LIMITED_API
PyAPI_DATA(PyThreadFrameGetter) _PyThreadState_GetFrame;
#endif
#ifdef __cplusplus
}
#endif
......
......@@ -987,6 +987,13 @@ PYTHON_HEADERS= \
pyconfig.h \
$(PARSER_HEADERS) \
$(srcdir)/Include/Python-ast.h \
$(srcdir)/Include/internal/_Python.h \
$(srcdir)/Include/internal/_ceval.h \
$(srcdir)/Include/internal/_gil.h \
$(srcdir)/Include/internal/_mem.h \
$(srcdir)/Include/internal/_pymalloc.h \
$(srcdir)/Include/internal/_pystate.h \
$(srcdir)/Include/internal/_warnings.h \
$(DTRACE_HEADERS)
$(LIBRARY_OBJS) $(MODOBJS) Programs/python.o: $(PYTHON_HEADERS)
......
Consolidate CPython's global runtime state under a single struct. This
improves discoverability of the runtime state.
......@@ -279,7 +279,7 @@ _enter_buffered_busy(buffered *self)
"reentrant call inside %R", self);
return 0;
}
relax_locking = (_Py_Finalizing != NULL);
relax_locking = _Py_IS_FINALIZING();
Py_BEGIN_ALLOW_THREADS
if (!relax_locking)
st = PyThread_acquire_lock(self->lock, 1);
......
......@@ -14,7 +14,6 @@
#include "pythread.h"
static PyObject *ThreadError;
static long nb_threads = 0;
static PyObject *str_dict;
_Py_IDENTIFIER(stderr);
......@@ -993,7 +992,7 @@ t_bootstrap(void *boot_raw)
tstate->thread_id = PyThread_get_thread_ident();
_PyThreadState_Init(tstate);
PyEval_AcquireThread(tstate);
nb_threads++;
tstate->interp->num_threads++;
res = PyObject_Call(boot->func, boot->args, boot->keyw);
if (res == NULL) {
if (PyErr_ExceptionMatches(PyExc_SystemExit))
......@@ -1020,7 +1019,7 @@ t_bootstrap(void *boot_raw)
Py_DECREF(boot->args);
Py_XDECREF(boot->keyw);
PyMem_DEL(boot_raw);
nb_threads--;
tstate->interp->num_threads--;
PyThreadState_Clear(tstate);
PyThreadState_DeleteCurrent();
PyThread_exit_thread();
......@@ -1159,7 +1158,8 @@ A thread's identity may be reused for another thread after it exits.");
static PyObject *
thread__count(PyObject *self)
{
return PyLong_FromLong(nb_threads);
PyThreadState *tstate = PyThreadState_Get();
return PyLong_FromLong(tstate->interp->num_threads);
}
PyDoc_STRVAR(_count_doc,
......@@ -1352,6 +1352,7 @@ PyInit__thread(void)
PyObject *m, *d, *v;
double time_max;
double timeout_max;
PyThreadState *tstate = PyThreadState_Get();
/* Initialize types: */
if (PyType_Ready(&localdummytype) < 0)
......@@ -1396,7 +1397,7 @@ PyInit__thread(void)
if (PyModule_AddObject(m, "_local", (PyObject *)&localtype) < 0)
return NULL;
nb_threads = 0;
tstate->interp->num_threads = 0;
str_dict = PyUnicode_InternFromString("__dict__");
if (str_dict == NULL)
......
......@@ -114,7 +114,7 @@ overlapped_dealloc(OverlappedObject *self)
{
/* The operation is no longer pending -- nothing to do. */
}
else if (_Py_Finalizing == NULL)
else if _Py_IS_FINALIZING()
{
/* The operation is still pending -- give a warning. This
will probably only happen on Windows XP. */
......
This diff is collapsed.
......@@ -598,16 +598,10 @@ Py_Main(int argc, wchar_t **argv)
}
}
char *pymalloc = Py_GETENV("PYTHONMALLOC");
if (_PyMem_SetupAllocators(pymalloc) < 0) {
fprintf(stderr,
"Error in PYTHONMALLOC: unknown allocator \"%s\"!\n", pymalloc);
exit(1);
}
/* Initialize the core language runtime */
Py_IgnoreEnvironmentFlag = core_config.ignore_environment;
core_config._disable_importlib = 0;
core_config.allocator = Py_GETENV("PYTHONMALLOC");
_Py_InitializeCore(&core_config);
/* Reprocess the command line with the language runtime available */
......
......@@ -2028,14 +2028,6 @@ finally:
/* Trashcan support. */
/* Current call-stack depth of tp_dealloc calls. */
int _PyTrash_delete_nesting = 0;
/* List of objects that still need to be cleaned up, singly linked via their
* gc headers' gc_prev pointers.
*/
PyObject *_PyTrash_delete_later = NULL;
/* Add op to the _PyTrash_delete_later list. Called when the current
* call-stack depth gets large. op must be a currently untracked gc'ed
* object, with refcount 0. Py_DECREF must already have been called on it.
......@@ -2046,8 +2038,8 @@ _PyTrash_deposit_object(PyObject *op)
assert(PyObject_IS_GC(op));
assert(_PyGC_REFS(op) == _PyGC_REFS_UNTRACKED);
assert(op->ob_refcnt == 0);
_Py_AS_GC(op)->gc.gc_prev = (PyGC_Head *)_PyTrash_delete_later;
_PyTrash_delete_later = op;
_Py_AS_GC(op)->gc.gc_prev = (PyGC_Head *)_PyRuntime.gc.trash_delete_later;
_PyRuntime.gc.trash_delete_later = op;
}
/* The equivalent API, using per-thread state recursion info */
......@@ -2068,11 +2060,11 @@ _PyTrash_thread_deposit_object(PyObject *op)
void
_PyTrash_destroy_chain(void)
{
while (_PyTrash_delete_later) {
PyObject *op = _PyTrash_delete_later;
while (_PyRuntime.gc.trash_delete_later) {
PyObject *op = _PyRuntime.gc.trash_delete_later;
destructor dealloc = Py_TYPE(op)->tp_dealloc;
_PyTrash_delete_later =
_PyRuntime.gc.trash_delete_later =
(PyObject*) _Py_AS_GC(op)->gc.gc_prev;
/* Call the deallocator directly. This used to try to
......@@ -2082,9 +2074,9 @@ _PyTrash_destroy_chain(void)
* up distorting allocation statistics.
*/
assert(op->ob_refcnt == 0);
++_PyTrash_delete_nesting;
++_PyRuntime.gc.trash_delete_nesting;
(*dealloc)(op);
--_PyTrash_delete_nesting;
--_PyRuntime.gc.trash_delete_nesting;
}
}
......
This diff is collapsed.
......@@ -1115,6 +1115,7 @@ frozenset_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
}
/* The empty frozenset is a singleton */
if (emptyfrozenset == NULL)
/* There is a possible (relatively harmless) race here. */
emptyfrozenset = make_new_set(type, NULL);
Py_XINCREF(emptyfrozenset);
return emptyfrozenset;
......
......@@ -1157,10 +1157,10 @@ subtype_dealloc(PyObject *self)
/* UnTrack and re-Track around the trashcan macro, alas */
/* See explanation at end of function for full disclosure */
PyObject_GC_UnTrack(self);
++_PyTrash_delete_nesting;
++_PyRuntime.gc.trash_delete_nesting;
++ tstate->trash_delete_nesting;
Py_TRASHCAN_SAFE_BEGIN(self);
--_PyTrash_delete_nesting;
--_PyRuntime.gc.trash_delete_nesting;
-- tstate->trash_delete_nesting;
/* Find the nearest base with a different tp_dealloc */
......@@ -1254,10 +1254,10 @@ subtype_dealloc(PyObject *self)
Py_DECREF(type);
endlabel:
++_PyTrash_delete_nesting;
++_PyRuntime.gc.trash_delete_nesting;
++ tstate->trash_delete_nesting;
Py_TRASHCAN_SAFE_END(self);
--_PyTrash_delete_nesting;
--_PyRuntime.gc.trash_delete_nesting;
-- tstate->trash_delete_nesting;
/* Explanation of the weirdness around the trashcan macros:
......@@ -1297,7 +1297,7 @@ subtype_dealloc(PyObject *self)
a subtle disaster.
Q. Why the bizarre (net-zero) manipulation of
_PyTrash_delete_nesting around the trashcan macros?
_PyRuntime.trash_delete_nesting around the trashcan macros?
A. Some base classes (e.g. list) also use the trashcan mechanism.
The following scenario used to be possible:
......
......@@ -106,6 +106,14 @@
<ClInclude Include="..\Include\graminit.h" />
<ClInclude Include="..\Include\grammar.h" />
<ClInclude Include="..\Include\import.h" />
<ClInclude Include="..\Include\internal\_Python.h" />
<ClInclude Include="..\Include\internal\_ceval.h" />
<ClInclude Include="..\Include\internal\_condvar.h" />
<ClInclude Include="..\Include\internal\_gil.h" />
<ClInclude Include="..\Include\internal\_mem.h" />
<ClInclude Include="..\Include\internal\_pymalloc.h" />
<ClInclude Include="..\Include\internal\_pystate.h" />
<ClInclude Include="..\Include\internal\_warnings.h" />
<ClInclude Include="..\Include\intrcheck.h" />
<ClInclude Include="..\Include\iterobject.h" />
<ClInclude Include="..\Include\listobject.h" />
......
......@@ -129,6 +129,30 @@
<ClInclude Include="..\Include\import.h">
<Filter>Include</Filter>
</ClInclude>
<ClInclude Include="..\Include\internal\_Python.h">
<Filter>Include</Filter>
</ClInclude>
<ClInclude Include="..\Include\internal\_ceval.h">
<Filter>Include</Filter>
</ClInclude>
<ClInclude Include="..\Include\internal\_condvar.h">
<Filter>Include</Filter>
</ClInclude>
<ClInclude Include="..\Include\internal\_gil.h">
<Filter>Include</Filter>
</ClInclude>
<ClInclude Include="..\Include\internal\_mem.h">
<Filter>Include</Filter>
</ClInclude>
<ClInclude Include="..\Include\internal\_pymalloc.h">
<Filter>Include</Filter>
</ClInclude>
<ClInclude Include="..\Include\internal\_pystate.h">
<Filter>Include</Filter>
</ClInclude>
<ClInclude Include="..\Include\internal\_warnings.h">
<Filter>Include</Filter>
</ClInclude>
<ClInclude Include="..\Include\intrcheck.h">
<Filter>Include</Filter>
</ClInclude>
......
......@@ -21,10 +21,12 @@
#include "node.h"
#include "parsetok.h"
#include "pgen.h"
#include "internal/_mem.h"
int Py_DebugFlag;
int Py_VerboseFlag;
int Py_IgnoreEnvironmentFlag;
struct pyruntimestate _PyRuntime = {};
/* Forward */
grammar *getgrammar(const char *filename);
......@@ -61,6 +63,8 @@ main(int argc, char **argv)
filename = argv[1];
graminit_h = argv[2];
graminit_c = argv[3];
_PyObject_Initialize(&_PyRuntime.obj);
_PyMem_Initialize(&_PyRuntime.mem);
g = getgrammar(filename);
fp = fopen(graminit_c, "w");
if (fp == NULL) {
......
......@@ -8,13 +8,6 @@ PyDoc_STRVAR(warnings__doc__,
MODULE_NAME " provides basic warning filtering support.\n"
"It is a helper module to speed up interpreter start-up.");
/* Both 'filters' and 'onceregistry' can be set in warnings.py;
get_warnings_attr() will reset these variables accordingly. */
static PyObject *_filters; /* List */
static PyObject *_once_registry; /* Dict */
static PyObject *_default_action; /* String */
static long _filters_version;
_Py_IDENTIFIER(argv);
_Py_IDENTIFIER(stderr);
......@@ -53,7 +46,7 @@ get_warnings_attr(const char *attr, int try_import)
}
/* don't try to import after the start of the Python finallization */
if (try_import && _Py_Finalizing == NULL) {
if (try_import && !_Py_IS_FINALIZING()) {
warnings_module = PyImport_Import(warnings_str);
if (warnings_module == NULL) {
/* Fallback to the C implementation if we cannot get
......@@ -90,10 +83,10 @@ get_once_registry(void)
if (registry == NULL) {
if (PyErr_Occurred())
return NULL;
return _once_registry;
return _PyRuntime.warnings.once_registry;
}
Py_DECREF(_once_registry);
_once_registry = registry;
Py_DECREF(_PyRuntime.warnings.once_registry);
_PyRuntime.warnings.once_registry = registry;
return registry;
}
......@@ -108,11 +101,11 @@ get_default_action(void)
if (PyErr_Occurred()) {
return NULL;
}
return _default_action;
return _PyRuntime.warnings.default_action;
}
Py_DECREF(_default_action);
_default_action = default_action;
Py_DECREF(_PyRuntime.warnings.default_action);
_PyRuntime.warnings.default_action = default_action;
return default_action;
}
......@@ -132,23 +125,24 @@ get_filter(PyObject *category, PyObject *text, Py_ssize_t lineno,
return NULL;
}
else {
Py_DECREF(_filters);
_filters = warnings_filters;
Py_DECREF(_PyRuntime.warnings.filters);
_PyRuntime.warnings.filters = warnings_filters;
}
if (_filters == NULL || !PyList_Check(_filters)) {
PyObject *filters = _PyRuntime.warnings.filters;
if (filters == NULL || !PyList_Check(filters)) {
PyErr_SetString(PyExc_ValueError,
MODULE_NAME ".filters must be a list");
return NULL;
}
/* _filters could change while we are iterating over it. */
for (i = 0; i < PyList_GET_SIZE(_filters); i++) {
/* _PyRuntime.warnings.filters could change while we are iterating over it. */
for (i = 0; i < PyList_GET_SIZE(filters); i++) {
PyObject *tmp_item, *action, *msg, *cat, *mod, *ln_obj;
Py_ssize_t ln;
int is_subclass, good_msg, good_mod;
tmp_item = PyList_GET_ITEM(_filters, i);
tmp_item = PyList_GET_ITEM(filters, i);
if (!PyTuple_Check(tmp_item) || PyTuple_GET_SIZE(tmp_item) != 5) {
PyErr_Format(PyExc_ValueError,
MODULE_NAME ".filters item %zd isn't a 5-tuple", i);
......@@ -220,9 +214,9 @@ already_warned(PyObject *registry, PyObject *key, int should_set)
version_obj = _PyDict_GetItemId(registry, &PyId_version);
if (version_obj == NULL
|| !PyLong_CheckExact(version_obj)
|| PyLong_AsLong(version_obj) != _filters_version) {
|| PyLong_AsLong(version_obj) != _PyRuntime.warnings.filters_version) {
PyDict_Clear(registry);
version_obj = PyLong_FromLong(_filters_version);
version_obj = PyLong_FromLong(_PyRuntime.warnings.filters_version);
if (version_obj == NULL)
return -1;
if (_PyDict_SetItemId(registry, &PyId_version, version_obj) < 0) {
......@@ -520,7 +514,7 @@ warn_explicit(PyObject *category, PyObject *message,
if (registry == NULL)
goto cleanup;
}
/* _once_registry[(text, category)] = 1 */
/* _PyRuntime.warnings.once_registry[(text, category)] = 1 */
rc = update_registry(registry, text, category, 0);
}
else if (_PyUnicode_EqualToASCIIString(action, "module")) {
......@@ -910,7 +904,7 @@ warnings_warn_explicit(PyObject *self, PyObject *args, PyObject *kwds)
static PyObject *
warnings_filters_mutated(PyObject *self, PyObject *args)
{
_filters_version++;
_PyRuntime.warnings.filters_version++;
Py_RETURN_NONE;
}
......@@ -1160,7 +1154,8 @@ create_filter(PyObject *category, const char *action)
}
/* This assumes the line number is zero for now. */
return PyTuple_Pack(5, action_obj, Py_None, category, Py_None, _PyLong_Zero);
return PyTuple_Pack(5, action_obj, Py_None,
category, Py_None, _PyLong_Zero);
}
static PyObject *
......@@ -1228,33 +1223,35 @@ _PyWarnings_Init(void)
if (m == NULL)
return NULL;
if (_filters == NULL) {
_filters = init_filters();
if (_filters == NULL)
if (_PyRuntime.warnings.filters == NULL) {
_PyRuntime.warnings.filters = init_filters();
if (_PyRuntime.warnings.filters == NULL)
return NULL;
}
Py_INCREF(_filters);
if (PyModule_AddObject(m, "filters", _filters) < 0)
Py_INCREF(_PyRuntime.warnings.filters);
if (PyModule_AddObject(m, "filters", _PyRuntime.warnings.filters) < 0)
return NULL;
if (_once_registry == NULL) {
_once_registry = PyDict_New();
if (_once_registry == NULL)
if (_PyRuntime.warnings.once_registry == NULL) {
_PyRuntime.warnings.once_registry = PyDict_New();
if (_PyRuntime.warnings.once_registry == NULL)
return NULL;
}
Py_INCREF(_once_registry);
if (PyModule_AddObject(m, "_onceregistry", _once_registry) < 0)
Py_INCREF(_PyRuntime.warnings.once_registry);
if (PyModule_AddObject(m, "_onceregistry",
_PyRuntime.warnings.once_registry) < 0)
return NULL;
if (_default_action == NULL) {
_default_action = PyUnicode_FromString("default");
if (_default_action == NULL)
if (_PyRuntime.warnings.default_action == NULL) {
_PyRuntime.warnings.default_action = PyUnicode_FromString("default");
if (_PyRuntime.warnings.default_action == NULL)
return NULL;
}
Py_INCREF(_default_action);
if (PyModule_AddObject(m, "_defaultaction", _default_action) < 0)
Py_INCREF(_PyRuntime.warnings.default_action);
if (PyModule_AddObject(m, "_defaultaction",
_PyRuntime.warnings.default_action) < 0)
return NULL;
_filters_version = 0;
_PyRuntime.warnings.filters_version = 0;
return m;
}
This diff is collapsed.
This diff is collapsed.
......@@ -37,27 +37,16 @@
* Condition Variable.
*/
#ifndef _CONDVAR_H_
#define _CONDVAR_H_
#ifndef _CONDVAR_IMPL_H_
#define _CONDVAR_IMPL_H_
#include "Python.h"
#ifndef _POSIX_THREADS
/* This means pthreads are not implemented in libc headers, hence the macro
not present in unistd.h. But they still can be implemented as an external
library (e.g. gnu pth in pthread emulation) */
# ifdef HAVE_PTHREAD_H
# include <pthread.h> /* _POSIX_THREADS */
# endif
#endif
#include "internal/_condvar.h"
#ifdef _POSIX_THREADS
/*
* POSIX support
*/
#define Py_HAVE_CONDVAR
#include <pthread.h>
#define PyCOND_ADD_MICROSECONDS(tv, interval) \
do { /* TODO: add overflow and truncation checks */ \
......@@ -74,13 +63,11 @@ do { /* TODO: add overflow and truncation checks */ \
#endif
/* The following functions return 0 on success, nonzero on error */
#define PyMUTEX_T pthread_mutex_t
#define PyMUTEX_INIT(mut) pthread_mutex_init((mut), NULL)
#define PyMUTEX_FINI(mut) pthread_mutex_destroy(mut)
#define PyMUTEX_LOCK(mut) pthread_mutex_lock(mut)
#define PyMUTEX_UNLOCK(mut) pthread_mutex_unlock(mut)
#define PyCOND_T pthread_cond_t
#define PyCOND_INIT(cond) pthread_cond_init((cond), NULL)
#define PyCOND_FINI(cond) pthread_cond_destroy(cond)
#define PyCOND_SIGNAL(cond) pthread_cond_signal(cond)
......@@ -116,45 +103,11 @@ PyCOND_TIMEDWAIT(PyCOND_T *cond, PyMUTEX_T *mut, long long us)
* Emulated condition variables ones that work with XP and later, plus
* example native support on VISTA and onwards.
*/
#define Py_HAVE_CONDVAR
/* include windows if it hasn't been done before */
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
/* options */
/* non-emulated condition variables are provided for those that want
* to target Windows Vista. Modify this macro to enable them.
*/
#ifndef _PY_EMULATED_WIN_CV
#define _PY_EMULATED_WIN_CV 1 /* use emulated condition variables */
#endif
/* fall back to emulation if not targeting Vista */
#if !defined NTDDI_VISTA || NTDDI_VERSION < NTDDI_VISTA
#undef _PY_EMULATED_WIN_CV
#define _PY_EMULATED_WIN_CV 1
#endif
#if _PY_EMULATED_WIN_CV
/* The mutex is a CriticalSection object and
The condition variables is emulated with the help of a semaphore.
Semaphores are available on Windows XP (2003 server) and later.
We use a Semaphore rather than an auto-reset event, because although
an auto-resent event might appear to solve the lost-wakeup bug (race
condition between releasing the outer lock and waiting) because it
maintains state even though a wait hasn't happened, there is still
a lost wakeup problem if more than one thread are interrupted in the
critical place. A semaphore solves that, because its state is counted,
not Boolean.
Because it is ok to signal a condition variable with no one
waiting, we need to keep track of the number of
waiting threads. Otherwise, the semaphore's state could rise
without bound. This also helps reduce the number of "spurious wakeups"
that would otherwise happen.
This implementation still has the problem that the threads woken
with a "signal" aren't necessarily those that are already
......@@ -168,8 +121,6 @@ PyCOND_TIMEDWAIT(PyCOND_T *cond, PyMUTEX_T *mut, long long us)
http://www.cse.wustl.edu/~schmidt/win32-cv-1.html
*/
typedef CRITICAL_SECTION PyMUTEX_T;
Py_LOCAL_INLINE(int)
PyMUTEX_INIT(PyMUTEX_T *cs)
{
......@@ -198,15 +149,6 @@ PyMUTEX_UNLOCK(PyMUTEX_T *cs)
return 0;
}
/* The ConditionVariable object. From XP onwards it is easily emulated with
* a Semaphore
*/
typedef struct _PyCOND_T
{
HANDLE sem;
int waiting; /* to allow PyCOND_SIGNAL to be a no-op */
} PyCOND_T;
Py_LOCAL_INLINE(int)
PyCOND_INIT(PyCOND_T *cv)
......@@ -304,12 +246,7 @@ PyCOND_BROADCAST(PyCOND_T *cv)
return 0;
}
#else
/* Use native Win7 primitives if build target is Win7 or higher */
/* SRWLOCK is faster and better than CriticalSection */
typedef SRWLOCK PyMUTEX_T;
#else /* !_PY_EMULATED_WIN_CV */
Py_LOCAL_INLINE(int)
PyMUTEX_INIT(PyMUTEX_T *cs)
......@@ -339,8 +276,6 @@ PyMUTEX_UNLOCK(PyMUTEX_T *cs)
}
typedef CONDITION_VARIABLE PyCOND_T;
Py_LOCAL_INLINE(int)
PyCOND_INIT(PyCOND_T *cv)
{
......@@ -387,4 +322,4 @@ PyCOND_BROADCAST(PyCOND_T *cv)
#endif /* _POSIX_THREADS, NT_THREADS */
#endif /* _CONDVAR_H_ */
#endif /* _CONDVAR_IMPL_H_ */
......@@ -77,6 +77,30 @@ extern void _PyGILState_Init(PyInterpreterState *, PyThreadState *);
extern void _PyGILState_Fini(void);
#endif /* WITH_THREAD */
_PyRuntimeState _PyRuntime = {};
void
_PyRuntime_Initialize(void)
{
/* XXX We only initialize once in the process, which aligns with
the static initialization of the former globals now found in
_PyRuntime. However, _PyRuntime *should* be initialized with
every Py_Initialize() call, but doing so breaks the runtime.
This is because the runtime state is not properly finalized
currently. */
static int initialized = 0;
if (initialized)
return;
initialized = 1;
_PyRuntimeState_Init(&_PyRuntime);
}
void
_PyRuntime_Finalize(void)
{
_PyRuntimeState_Fini(&_PyRuntime);
}
/* Global configuration variable declarations are in pydebug.h */
/* XXX (ncoghlan): move those declarations to pylifecycle.h? */
int Py_DebugFlag; /* Needed by parser.c */
......@@ -100,8 +124,6 @@ int Py_LegacyWindowsFSEncodingFlag = 0; /* Uses mbcs instead of utf-8 */
int Py_LegacyWindowsStdioFlag = 0; /* Uses FileIO instead of WindowsConsoleIO */
#endif
PyThreadState *_Py_Finalizing = NULL;
/* Hack to force loading of object files */
int (*_PyOS_mystrnicmp_hack)(const char *, const char *, Py_ssize_t) = \
PyOS_mystrnicmp; /* Python/pystrcmp.o */
......@@ -119,19 +141,17 @@ PyModule_GetWarningsModule(void)
*
* Can be called prior to Py_Initialize.
*/
int _Py_CoreInitialized = 0;
int _Py_Initialized = 0;
int
_Py_IsCoreInitialized(void)
{
return _Py_CoreInitialized;
return _PyRuntime.core_initialized;
}
int
Py_IsInitialized(void)
{
return _Py_Initialized;
return _PyRuntime.initialized;
}
/* Helper to allow an embedding application to override the normal
......@@ -544,14 +564,16 @@ void _Py_InitializeCore(const _PyCoreConfig *config)
_PyCoreConfig core_config = _PyCoreConfig_INIT;
_PyMainInterpreterConfig preinit_config = _PyMainInterpreterConfig_INIT;
_PyRuntime_Initialize();
if (config != NULL) {
core_config = *config;
}
if (_Py_Initialized) {
if (_PyRuntime.initialized) {
Py_FatalError("Py_InitializeCore: main interpreter already initialized");
}
if (_Py_CoreInitialized) {
if (_PyRuntime.core_initialized) {
Py_FatalError("Py_InitializeCore: runtime core already initialized");
}
......@@ -564,7 +586,14 @@ void _Py_InitializeCore(const _PyCoreConfig *config)
* threads still hanging around from a previous Py_Initialize/Finalize
* pair :(
*/
_Py_Finalizing = NULL;
_PyRuntime.finalizing = NULL;
if (_PyMem_SetupAllocators(core_config.allocator) < 0) {
fprintf(stderr,
"Error in PYTHONMALLOC: unknown allocator \"%s\"!\n",
core_config.allocator);
exit(1);
}
#ifdef __ANDROID__
/* Passing "" to setlocale() on Android requests the C locale rather
......@@ -606,7 +635,7 @@ void _Py_InitializeCore(const _PyCoreConfig *config)
Py_HashRandomizationFlag = 1;
}
_PyInterpreterState_Init();
_PyInterpreterState_Enable(&_PyRuntime);
interp = PyInterpreterState_New();
if (interp == NULL)
Py_FatalError("Py_InitializeCore: can't make main interpreter");
......@@ -698,7 +727,7 @@ void _Py_InitializeCore(const _PyCoreConfig *config)
}
/* Only when we get here is the runtime core fully initialized */
_Py_CoreInitialized = 1;
_PyRuntime.core_initialized = 1;
}
/* Read configuration settings from standard locations
......@@ -739,10 +768,10 @@ int _Py_InitializeMainInterpreter(const _PyMainInterpreterConfig *config)
PyInterpreterState *interp;
PyThreadState *tstate;
if (!_Py_CoreInitialized) {
if (!_PyRuntime.core_initialized) {
Py_FatalError("Py_InitializeMainInterpreter: runtime core not initialized");
}
if (_Py_Initialized) {
if (_PyRuntime.initialized) {
Py_FatalError("Py_InitializeMainInterpreter: main interpreter already initialized");
}
......@@ -763,7 +792,7 @@ int _Py_InitializeMainInterpreter(const _PyMainInterpreterConfig *config)
* This means anything which needs support from extension modules
* or pure Python code in the standard library won't work.
*/
_Py_Initialized = 1;
_PyRuntime.initialized = 1;
return 0;
}
/* TODO: Report exceptions rather than fatal errors below here */
......@@ -808,7 +837,7 @@ int _Py_InitializeMainInterpreter(const _PyMainInterpreterConfig *config)
Py_XDECREF(warnings_module);
}
_Py_Initialized = 1;
_PyRuntime.initialized = 1;
if (!Py_NoSiteFlag)
initsite(); /* Module site */
......@@ -924,7 +953,7 @@ Py_FinalizeEx(void)
PyThreadState *tstate;
int status = 0;
if (!_Py_Initialized)
if (!_PyRuntime.initialized)
return status;
wait_for_thread_shutdown();
......@@ -946,9 +975,9 @@ Py_FinalizeEx(void)
/* Remaining threads (e.g. daemon threads) will automatically exit
after taking the GIL (in PyEval_RestoreThread()). */
_Py_Finalizing = tstate;
_Py_Initialized = 0;
_Py_CoreInitialized = 0;
_PyRuntime.finalizing = tstate;
_PyRuntime.initialized = 0;
_PyRuntime.core_initialized = 0;
/* Flush sys.stdout and sys.stderr */
if (flush_std_files() < 0) {
......@@ -1110,6 +1139,7 @@ Py_FinalizeEx(void)
#endif
call_ll_exitfuncs();
_PyRuntime_Finalize();
return status;
}
......@@ -1139,7 +1169,7 @@ Py_NewInterpreter(void)
PyThreadState *tstate, *save_tstate;
PyObject *bimod, *sysmod;
if (!_Py_Initialized)
if (!_PyRuntime.initialized)
Py_FatalError("Py_NewInterpreter: call Py_Initialize first");
#ifdef WITH_THREAD
......@@ -1854,20 +1884,19 @@ exit:
# include "pythread.h"
#endif
static void (*pyexitfunc)(void) = NULL;
/* For the atexit module. */
void _Py_PyAtExit(void (*func)(void))
{
pyexitfunc = func;
_PyRuntime.pyexitfunc = func;
}
static void
call_py_exitfuncs(void)
{
if (pyexitfunc == NULL)
if (_PyRuntime.pyexitfunc == NULL)
return;
(*pyexitfunc)();
(*_PyRuntime.pyexitfunc)();
PyErr_Clear();
}
......@@ -1900,22 +1929,19 @@ wait_for_thread_shutdown(void)
}
#define NEXITFUNCS 32
static void (*exitfuncs[NEXITFUNCS])(void);
static int nexitfuncs = 0;
int Py_AtExit(void (*func)(void))
{
if (nexitfuncs >= NEXITFUNCS)
if (_PyRuntime.nexitfuncs >= NEXITFUNCS)
return -1;
exitfuncs[nexitfuncs++] = func;
_PyRuntime.exitfuncs[_PyRuntime.nexitfuncs++] = func;
return 0;
}
static void
call_ll_exitfuncs(void)
{
while (nexitfuncs > 0)
(*exitfuncs[--nexitfuncs])();
while (_PyRuntime.nexitfuncs > 0)
(*_PyRuntime.exitfuncs[--_PyRuntime.nexitfuncs])();
fflush(stdout);
fflush(stderr);
......
This diff is collapsed.
......@@ -519,8 +519,6 @@ Return the profiling function set with sys.setprofile.\n\
See the profiler chapter in the library manual."
);
static int _check_interval = 100;
static PyObject *
sys_setcheckinterval(PyObject *self, PyObject *args)
{
......@@ -529,7 +527,8 @@ sys_setcheckinterval(PyObject *self, PyObject *args)
"are deprecated. Use sys.setswitchinterval() "
"instead.", 1) < 0)
return NULL;
if (!PyArg_ParseTuple(args, "i:setcheckinterval", &_check_interval))
PyInterpreterState *interp = PyThreadState_GET()->interp;
if (!PyArg_ParseTuple(args, "i:setcheckinterval", &interp->check_interval))
return NULL;
Py_RETURN_NONE;
}
......@@ -549,7 +548,8 @@ sys_getcheckinterval(PyObject *self, PyObject *args)
"are deprecated. Use sys.getswitchinterval() "
"instead.", 1) < 0)
return NULL;
return PyLong_FromLong(_check_interval);
PyInterpreterState *interp = PyThreadState_GET()->interp;
return PyLong_FromLong(interp->check_interval);
}
PyDoc_STRVAR(getcheckinterval_doc,
......@@ -1339,7 +1339,7 @@ Clear the internal type lookup cache.");
static PyObject *
sys_is_finalizing(PyObject* self, PyObject* args)
{
return PyBool_FromLong(_Py_Finalizing != NULL);
return PyBool_FromLong(_Py_IS_FINALIZING());
}
PyDoc_STRVAR(is_finalizing_doc,
......@@ -1479,11 +1479,24 @@ list_builtin_module_names(void)
return list;
}
static PyObject *warnoptions = NULL;
static PyObject *
get_warnoptions(void)
{
PyObject *warnoptions = PyThreadState_GET()->interp->warnoptions;
if (warnoptions == NULL || !PyList_Check(warnoptions)) {
Py_XDECREF(warnoptions);
warnoptions = PyList_New(0);
if (warnoptions == NULL)
return NULL;
PyThreadState_GET()->interp->warnoptions = warnoptions;
}
return warnoptions;
}
void
PySys_ResetWarnOptions(void)
{
PyObject *warnoptions = PyThreadState_GET()->interp->warnoptions;
if (warnoptions == NULL || !PyList_Check(warnoptions))
return;
PyList_SetSlice(warnoptions, 0, PyList_GET_SIZE(warnoptions), NULL);
......@@ -1492,12 +1505,9 @@ PySys_ResetWarnOptions(void)
void
PySys_AddWarnOptionUnicode(PyObject *unicode)
{
if (warnoptions == NULL || !PyList_Check(warnoptions)) {
Py_XDECREF(warnoptions);
warnoptions = PyList_New(0);
if (warnoptions == NULL)
return;
}
PyObject *warnoptions = get_warnoptions();
if (warnoptions == NULL)
return;
PyList_Append(warnoptions, unicode);
}
......@@ -1515,17 +1525,20 @@ PySys_AddWarnOption(const wchar_t *s)
int
PySys_HasWarnOptions(void)
{
PyObject *warnoptions = PyThreadState_GET()->interp->warnoptions;
return (warnoptions != NULL && (PyList_Size(warnoptions) > 0)) ? 1 : 0;
}
static PyObject *xoptions = NULL;
static PyObject *
get_xoptions(void)
{
PyObject *xoptions = PyThreadState_GET()->interp->xoptions;
if (xoptions == NULL || !PyDict_Check(xoptions)) {
Py_XDECREF(xoptions);
xoptions = PyDict_New();
if (xoptions == NULL)
return NULL;
PyThreadState_GET()->interp->xoptions = xoptions;
}
return xoptions;
}
......@@ -2130,17 +2143,15 @@ _PySys_EndInit(PyObject *sysdict)
SET_SYS_FROM_STRING_INT_RESULT("base_exec_prefix",
PyUnicode_FromWideChar(Py_GetExecPrefix(), -1));
if (warnoptions == NULL) {
warnoptions = PyList_New(0);
if (warnoptions == NULL)
return -1;
}
SET_SYS_FROM_STRING_INT_RESULT("warnoptions",
PyList_GetSlice(warnoptions,
0, Py_SIZE(warnoptions)));
PyObject *warnoptions = get_warnoptions();
if (warnoptions == NULL)
return -1;
SET_SYS_FROM_STRING_BORROW_INT_RESULT("warnoptions", warnoptions);
SET_SYS_FROM_STRING_BORROW_INT_RESULT("_xoptions", get_xoptions());
PyObject *xoptions = get_xoptions();
if (xoptions == NULL)
return -1;
SET_SYS_FROM_STRING_BORROW_INT_RESULT("_xoptions", xoptions);
if (PyErr_Occurred())
return -1;
......
......@@ -76,11 +76,6 @@ PyThread_init_thread(void)
PyThread__init_thread();
}
/* Support for runtime thread stack size tuning.
A value of 0 means using the platform's default stack size
or the size specified by the THREAD_STACK_SIZE macro. */
static size_t _pythread_stacksize = 0;
#if defined(_POSIX_THREADS)
# define PYTHREAD_NAME "pthread"
# include "thread_pthread.h"
......@@ -96,7 +91,7 @@ static size_t _pythread_stacksize = 0;
size_t
PyThread_get_stacksize(void)
{
return _pythread_stacksize;
return PyThreadState_GET()->interp->pythread_stacksize;
}
/* Only platforms defining a THREAD_SET_STACKSIZE() macro
......
......@@ -189,9 +189,10 @@ PyThread_start_new_thread(void (*func)(void *), void *arg)
return PYTHREAD_INVALID_THREAD_ID;
obj->func = func;
obj->arg = arg;
PyThreadState *tstate = PyThreadState_GET();
size_t stacksize = tstate ? tstate->interp->pythread_stacksize : 0;
hThread = (HANDLE)_beginthreadex(0,
Py_SAFE_DOWNCAST(_pythread_stacksize,
Py_ssize_t, unsigned int),
Py_SAFE_DOWNCAST(stacksize, Py_ssize_t, unsigned int),
bootstrap, obj,
0, &threadID);
if (hThread == 0) {
......@@ -332,13 +333,13 @@ _pythread_nt_set_stacksize(size_t size)
{
/* set to default */
if (size == 0) {
_pythread_stacksize = 0;
PyThreadState_GET()->interp->pythread_stacksize = 0;
return 0;
}
/* valid range? */
if (size >= THREAD_MIN_STACKSIZE && size < THREAD_MAX_STACKSIZE) {
_pythread_stacksize = size;
PyThreadState_GET()->interp->pythread_stacksize = size;
return 0;
}
......
......@@ -205,8 +205,9 @@ PyThread_start_new_thread(void (*func)(void *), void *arg)
return PYTHREAD_INVALID_THREAD_ID;
#endif
#if defined(THREAD_STACK_SIZE)
tss = (_pythread_stacksize != 0) ? _pythread_stacksize
: THREAD_STACK_SIZE;
PyThreadState *tstate = PyThreadState_GET();
size_t stacksize = tstate ? tstate->interp->pythread_stacksize : 0;
tss = (stacksize != 0) ? stacksize : THREAD_STACK_SIZE;
if (tss != 0) {
if (pthread_attr_setstacksize(&attrs, tss) != 0) {
pthread_attr_destroy(&attrs);
......@@ -578,7 +579,7 @@ _pythread_pthread_set_stacksize(size_t size)
/* set to default */
if (size == 0) {
_pythread_stacksize = 0;
PyThreadState_GET()->interp->pythread_stacksize = 0;
return 0;
}
......@@ -595,7 +596,7 @@ _pythread_pthread_set_stacksize(size_t size)
rc = pthread_attr_setstacksize(&attrs, size);
pthread_attr_destroy(&attrs);
if (rc == 0) {
_pythread_stacksize = size;
PyThreadState_GET()->interp->pythread_stacksize = size;
return 0;
}
}
......
#######################################
# C Globals and CPython Runtime State.
CPython's C code makes extensive use of global variables. Each global
falls into one of several categories:
* (effectively) constants (incl. static types)
* globals used exclusively in main or in the REPL
* freelists, caches, and counters
* process-global state
* module state
* Python runtime state
The ignored-globals.txt file is organized similarly. Of the different
categories, the last two are problematic and generally should not exist
in the codebase.
Globals that hold module state (i.e. in Modules/*.c) cause problems
when multiple interpreters are in use. For more info, see PEP 3121,
which addresses the situation for extension modules in general.
Globals in the last category should be avoided as well. The problem
isn't with the Python runtime having state. Rather, the problem is with
that state being spread thoughout the codebase in dozens of individual
globals. Unlike the other globals, the runtime state represents a set
of values that are constantly shifting in a complex way. When they are
spread out it's harder to get a clear picture of what the runtime
involves. Furthermore, when they are spread out it complicates efforts
that change the runtime.
Consequently, the globals for Python's runtime state have been
consolidated under a single top-level _PyRuntime global. No new globals
should be added for runtime state. Instead, they should be added to
_PyRuntimeState or one of its sub-structs. The check-c-globals script
should be run to ensure that no new globals have been added:
python3 Tools/c-globals/check-c-globals.py
If it reports any globals then they should be resolved. If the globals
are runtime state then they should be folded into _PyRuntimeState.
Otherwise they should be added to ignored-globals.txt.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment