Commit ef4ac967 authored by Eric Snow's avatar Eric Snow Committed by GitHub

bpo-33608: Factor out a private, per-interpreter _Py_AddPendingCall(). (GH-11617)

This involves moving the global "pending calls" state to PyInterpreterState.

https://bugs.python.org/issue33608
parent 463572c8
...@@ -221,7 +221,7 @@ PyAPI_FUNC(Py_ssize_t) _PyEval_RequestCodeExtraIndex(freefunc); ...@@ -221,7 +221,7 @@ PyAPI_FUNC(Py_ssize_t) _PyEval_RequestCodeExtraIndex(freefunc);
#ifndef Py_LIMITED_API #ifndef Py_LIMITED_API
PyAPI_FUNC(int) _PyEval_SliceIndex(PyObject *, Py_ssize_t *); PyAPI_FUNC(int) _PyEval_SliceIndex(PyObject *, Py_ssize_t *);
PyAPI_FUNC(int) _PyEval_SliceIndexNotNone(PyObject *, Py_ssize_t *); PyAPI_FUNC(int) _PyEval_SliceIndexNotNone(PyObject *, Py_ssize_t *);
PyAPI_FUNC(void) _PyEval_SignalAsyncExc(void); PyAPI_FUNC(void) _PyEval_SignalAsyncExc(PyInterpreterState *);
#endif #endif
/* Masks and values used by FORMAT_VALUE opcode. */ /* Masks and values used by FORMAT_VALUE opcode. */
......
...@@ -11,8 +11,12 @@ extern "C" { ...@@ -11,8 +11,12 @@ extern "C" {
#include "pycore_atomic.h" #include "pycore_atomic.h"
#include "pythread.h" #include "pythread.h"
struct _is; // See PyInterpreterState in cpython/pystate.h.
PyAPI_FUNC(int) _Py_AddPendingCall(struct _is*, unsigned long, int (*)(void *), void *);
PyAPI_FUNC(int) _Py_MakePendingCalls(struct _is*);
struct _pending_calls { struct _pending_calls {
unsigned long main_thread;
PyThread_type_lock lock; PyThread_type_lock lock;
/* Request for running pending calls. */ /* Request for running pending calls. */
_Py_atomic_int calls_to_do; _Py_atomic_int calls_to_do;
...@@ -22,6 +26,7 @@ struct _pending_calls { ...@@ -22,6 +26,7 @@ struct _pending_calls {
int async_exc; int async_exc;
#define NPENDINGCALLS 32 #define NPENDINGCALLS 32
struct { struct {
unsigned long thread_id;
int (*func)(void *); int (*func)(void *);
void *arg; void *arg;
} calls[NPENDINGCALLS]; } calls[NPENDINGCALLS];
...@@ -29,6 +34,13 @@ struct _pending_calls { ...@@ -29,6 +34,13 @@ struct _pending_calls {
int last; int last;
}; };
struct _ceval_interpreter_state {
/* This single variable consolidates all requests to break out of
the fast path in the eval loop. */
_Py_atomic_int eval_breaker;
struct _pending_calls pending;
};
#include "pycore_gil.h" #include "pycore_gil.h"
struct _ceval_runtime_state { struct _ceval_runtime_state {
...@@ -39,12 +51,8 @@ struct _ceval_runtime_state { ...@@ -39,12 +51,8 @@ struct _ceval_runtime_state {
c_tracefunc. This speeds up the if statement in c_tracefunc. This speeds up the if statement in
PyEval_EvalFrameEx() after fast_next_opcode. */ PyEval_EvalFrameEx() after fast_next_opcode. */
int tracing_possible; int tracing_possible;
/* This single variable consolidates all requests to break out of
the fast path in the eval loop. */
_Py_atomic_int eval_breaker;
/* Request for dropping the GIL */ /* Request for dropping the GIL */
_Py_atomic_int gil_drop_request; _Py_atomic_int gil_drop_request;
struct _pending_calls pending;
/* Request for checking signals. */ /* Request for checking signals. */
_Py_atomic_int signals_pending; _Py_atomic_int signals_pending;
struct _gil_runtime_state gil; struct _gil_runtime_state gil;
......
...@@ -11,6 +11,7 @@ extern "C" { ...@@ -11,6 +11,7 @@ extern "C" {
#include "pystate.h" #include "pystate.h"
#include "pythread.h" #include "pythread.h"
#include "pycore_atomic.h"
#include "pycore_ceval.h" #include "pycore_ceval.h"
#include "pycore_pathconfig.h" #include "pycore_pathconfig.h"
#include "pycore_pymem.h" #include "pycore_pymem.h"
...@@ -31,6 +32,8 @@ struct _is { ...@@ -31,6 +32,8 @@ struct _is {
int64_t id_refcount; int64_t id_refcount;
PyThread_type_lock id_mutex; PyThread_type_lock id_mutex;
int finalizing;
PyObject *modules; PyObject *modules;
PyObject *modules_by_index; PyObject *modules_by_index;
PyObject *sysdict; PyObject *sysdict;
...@@ -78,6 +81,8 @@ struct _is { ...@@ -78,6 +81,8 @@ struct _is {
PyObject *pyexitmodule; PyObject *pyexitmodule;
uint64_t tstate_next_unique_id; uint64_t tstate_next_unique_id;
struct _ceval_interpreter_state ceval;
}; };
PyAPI_FUNC(struct _is*) _PyInterpreterState_LookUpID(PY_INT64_T); PyAPI_FUNC(struct _is*) _PyInterpreterState_LookUpID(PY_INT64_T);
...@@ -207,6 +212,8 @@ typedef struct pyruntimestate { ...@@ -207,6 +212,8 @@ typedef struct pyruntimestate {
struct _xidregitem *head; struct _xidregitem *head;
} xidregistry; } xidregistry;
unsigned long main_thread;
#define NEXITFUNCS 32 #define NEXITFUNCS 32
void (*exitfuncs[NEXITFUNCS])(void); void (*exitfuncs[NEXITFUNCS])(void);
int nexitfuncs; int nexitfuncs;
......
We added a new internal _Py_AddPendingCall() that operates relative to the
provided interpreter. This allows us to use the existing implementation to
ask another interpreter to do work that cannot be done in the current
interpreter, like decref an object the other interpreter owns. The existing
Py_AddPendingCall() only operates relative to the main interpreter.
...@@ -2445,6 +2445,7 @@ pending_threadfunc(PyObject *self, PyObject *arg) ...@@ -2445,6 +2445,7 @@ pending_threadfunc(PyObject *self, PyObject *arg)
Py_INCREF(callable); Py_INCREF(callable);
Py_BEGIN_ALLOW_THREADS Py_BEGIN_ALLOW_THREADS
/* XXX Use the internal _Py_AddPendingCall(). */
r = Py_AddPendingCall(&_pending_callback, callable); r = Py_AddPendingCall(&_pending_callback, callable);
Py_END_ALLOW_THREADS Py_END_ALLOW_THREADS
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <process.h> #include <process.h>
#endif #endif
#endif #endif
#include "internal/pycore_pystate.h"
#ifdef HAVE_SIGNAL_H #ifdef HAVE_SIGNAL_H
#include <signal.h> #include <signal.h>
...@@ -295,8 +296,10 @@ trip_signal(int sig_num) ...@@ -295,8 +296,10 @@ trip_signal(int sig_num)
{ {
/* Py_AddPendingCall() isn't signal-safe, but we /* Py_AddPendingCall() isn't signal-safe, but we
still use it for this exceptional case. */ still use it for this exceptional case. */
Py_AddPendingCall(report_wakeup_send_error, _Py_AddPendingCall(_PyRuntime.interpreters.main,
(void *)(intptr_t) last_error); main_thread,
report_wakeup_send_error,
(void *)(intptr_t) last_error);
} }
} }
} }
...@@ -313,8 +316,10 @@ trip_signal(int sig_num) ...@@ -313,8 +316,10 @@ trip_signal(int sig_num)
{ {
/* Py_AddPendingCall() isn't signal-safe, but we /* Py_AddPendingCall() isn't signal-safe, but we
still use it for this exceptional case. */ still use it for this exceptional case. */
Py_AddPendingCall(report_wakeup_write_error, _Py_AddPendingCall(_PyRuntime.interpreters.main,
(void *)(intptr_t)errno); main_thread,
report_wakeup_write_error,
(void *)(intptr_t)errno);
} }
} }
} }
......
...@@ -96,61 +96,61 @@ static long dxp[256]; ...@@ -96,61 +96,61 @@ static long dxp[256];
/* This can set eval_breaker to 0 even though gil_drop_request became /* This can set eval_breaker to 0 even though gil_drop_request became
1. We believe this is all right because the eval loop will release 1. We believe this is all right because the eval loop will release
the GIL eventually anyway. */ the GIL eventually anyway. */
#define COMPUTE_EVAL_BREAKER() \ #define COMPUTE_EVAL_BREAKER(interp) \
_Py_atomic_store_relaxed( \ _Py_atomic_store_relaxed( \
&_PyRuntime.ceval.eval_breaker, \ &interp->ceval.eval_breaker, \
GIL_REQUEST | \ GIL_REQUEST | \
_Py_atomic_load_relaxed(&_PyRuntime.ceval.signals_pending) | \ _Py_atomic_load_relaxed(&_PyRuntime.ceval.signals_pending) | \
_Py_atomic_load_relaxed(&_PyRuntime.ceval.pending.calls_to_do) | \ _Py_atomic_load_relaxed(&interp->ceval.pending.calls_to_do) | \
_PyRuntime.ceval.pending.async_exc) interp->ceval.pending.async_exc)
#define SET_GIL_DROP_REQUEST() \ #define SET_GIL_DROP_REQUEST(interp) \
do { \ do { \
_Py_atomic_store_relaxed(&_PyRuntime.ceval.gil_drop_request, 1); \ _Py_atomic_store_relaxed(&_PyRuntime.ceval.gil_drop_request, 1); \
_Py_atomic_store_relaxed(&_PyRuntime.ceval.eval_breaker, 1); \ _Py_atomic_store_relaxed(&interp->ceval.eval_breaker, 1); \
} while (0) } while (0)
#define RESET_GIL_DROP_REQUEST() \ #define RESET_GIL_DROP_REQUEST(interp) \
do { \ do { \
_Py_atomic_store_relaxed(&_PyRuntime.ceval.gil_drop_request, 0); \ _Py_atomic_store_relaxed(&_PyRuntime.ceval.gil_drop_request, 0); \
COMPUTE_EVAL_BREAKER(); \ COMPUTE_EVAL_BREAKER(interp); \
} while (0) } while (0)
/* Pending calls are only modified under pending_lock */ /* Pending calls are only modified under pending_lock */
#define SIGNAL_PENDING_CALLS() \ #define SIGNAL_PENDING_CALLS(interp) \
do { \ do { \
_Py_atomic_store_relaxed(&_PyRuntime.ceval.pending.calls_to_do, 1); \ _Py_atomic_store_relaxed(&interp->ceval.pending.calls_to_do, 1); \
_Py_atomic_store_relaxed(&_PyRuntime.ceval.eval_breaker, 1); \ _Py_atomic_store_relaxed(&interp->ceval.eval_breaker, 1); \
} while (0) } while (0)
#define UNSIGNAL_PENDING_CALLS() \ #define UNSIGNAL_PENDING_CALLS(interp) \
do { \ do { \
_Py_atomic_store_relaxed(&_PyRuntime.ceval.pending.calls_to_do, 0); \ _Py_atomic_store_relaxed(&interp->ceval.pending.calls_to_do, 0); \
COMPUTE_EVAL_BREAKER(); \ COMPUTE_EVAL_BREAKER(interp); \
} while (0) } while (0)
#define SIGNAL_PENDING_SIGNALS() \ #define SIGNAL_PENDING_SIGNALS() \
do { \ do { \
_Py_atomic_store_relaxed(&_PyRuntime.ceval.signals_pending, 1); \ _Py_atomic_store_relaxed(&_PyRuntime.ceval.signals_pending, 1); \
_Py_atomic_store_relaxed(&_PyRuntime.ceval.eval_breaker, 1); \ _Py_atomic_store_relaxed(&_PyRuntime.interpreters.main->ceval.eval_breaker, 1); \
} while (0) } while (0)
#define UNSIGNAL_PENDING_SIGNALS() \ #define UNSIGNAL_PENDING_SIGNALS() \
do { \ do { \
_Py_atomic_store_relaxed(&_PyRuntime.ceval.signals_pending, 0); \ _Py_atomic_store_relaxed(&_PyRuntime.ceval.signals_pending, 0); \
COMPUTE_EVAL_BREAKER(); \ COMPUTE_EVAL_BREAKER(_PyRuntime.interpreters.main); \
} while (0) } while (0)
#define SIGNAL_ASYNC_EXC() \ #define SIGNAL_ASYNC_EXC(interp) \
do { \ do { \
_PyRuntime.ceval.pending.async_exc = 1; \ interp->ceval.pending.async_exc = 1; \
_Py_atomic_store_relaxed(&_PyRuntime.ceval.eval_breaker, 1); \ _Py_atomic_store_relaxed(&interp->ceval.eval_breaker, 1); \
} while (0) } while (0)
#define UNSIGNAL_ASYNC_EXC() \ #define UNSIGNAL_ASYNC_EXC(interp) \
do { \ do { \
_PyRuntime.ceval.pending.async_exc = 0; \ interp->ceval.pending.async_exc = 0; \
COMPUTE_EVAL_BREAKER(); \ COMPUTE_EVAL_BREAKER(interp); \
} while (0) } while (0)
...@@ -174,9 +174,6 @@ PyEval_InitThreads(void) ...@@ -174,9 +174,6 @@ PyEval_InitThreads(void)
PyThread_init_thread(); PyThread_init_thread();
create_gil(); create_gil();
take_gil(_PyThreadState_GET()); take_gil(_PyThreadState_GET());
_PyRuntime.ceval.pending.main_thread = PyThread_get_thread_ident();
if (!_PyRuntime.ceval.pending.lock)
_PyRuntime.ceval.pending.lock = PyThread_allocate_lock();
} }
void void
...@@ -243,9 +240,11 @@ PyEval_ReInitThreads(void) ...@@ -243,9 +240,11 @@ PyEval_ReInitThreads(void)
if (!gil_created()) if (!gil_created())
return; return;
recreate_gil(); recreate_gil();
_PyRuntime.ceval.pending.lock = PyThread_allocate_lock(); // This will be reset in make_pending_calls() below.
current_tstate->interp->ceval.pending.lock = NULL;
take_gil(current_tstate); take_gil(current_tstate);
_PyRuntime.ceval.pending.main_thread = PyThread_get_thread_ident(); _PyRuntime.main_thread = PyThread_get_thread_ident();
/* Destroy all threads except the current one */ /* Destroy all threads except the current one */
_PyThreadState_DeleteExcept(current_tstate); _PyThreadState_DeleteExcept(current_tstate);
...@@ -255,9 +254,9 @@ PyEval_ReInitThreads(void) ...@@ -255,9 +254,9 @@ PyEval_ReInitThreads(void)
raised. */ raised. */
void void
_PyEval_SignalAsyncExc(void) _PyEval_SignalAsyncExc(PyInterpreterState *interp)
{ {
SIGNAL_ASYNC_EXC(); SIGNAL_ASYNC_EXC(interp);
} }
PyThreadState * PyThreadState *
...@@ -323,17 +322,58 @@ _PyEval_SignalReceived(void) ...@@ -323,17 +322,58 @@ _PyEval_SignalReceived(void)
SIGNAL_PENDING_SIGNALS(); SIGNAL_PENDING_SIGNALS();
} }
static int
_add_pending_call(PyInterpreterState *interp, unsigned long thread_id, int (*func)(void *), void *arg)
{
int i = interp->ceval.pending.last;
int j = (i + 1) % NPENDINGCALLS;
if (j == interp->ceval.pending.first) {
return -1; /* Queue full */
}
interp->ceval.pending.calls[i].thread_id = thread_id;
interp->ceval.pending.calls[i].func = func;
interp->ceval.pending.calls[i].arg = arg;
interp->ceval.pending.last = j;
return 0;
}
/* pop one item off the queue while holding the lock */
static void
_pop_pending_call(PyInterpreterState *interp, int (**func)(void *), void **arg)
{
int i = interp->ceval.pending.first;
if (i == interp->ceval.pending.last) {
return; /* Queue empty */
}
*func = interp->ceval.pending.calls[i].func;
*arg = interp->ceval.pending.calls[i].arg;
interp->ceval.pending.first = (i + 1) % NPENDINGCALLS;
unsigned long thread_id = interp->ceval.pending.calls[i].thread_id;
if (thread_id && PyThread_get_thread_ident() != thread_id) {
// Thread mismatch, so move it to the end of the list
// and start over.
_Py_AddPendingCall(interp, thread_id, *func, *arg);
return;
}
}
int
Py_AddPendingCall(int (*func)(void *), void *arg)
{
PyInterpreterState *interp = _PyRuntime.interpreters.main;
return _Py_AddPendingCall(interp, _PyRuntime.main_thread, func, arg);
}
/* This implementation is thread-safe. It allows /* This implementation is thread-safe. It allows
scheduling to be made from any thread, and even from an executing scheduling to be made from any thread, and even from an executing
callback. callback.
*/ */
int int
Py_AddPendingCall(int (*func)(void *), void *arg) _Py_AddPendingCall(PyInterpreterState *interp, unsigned long thread_id, int (*func)(void *), void *arg)
{ {
int i, j, result=0;
PyThread_type_lock lock = _PyRuntime.ceval.pending.lock;
/* try a few times for the lock. Since this mechanism is used /* try a few times for the lock. Since this mechanism is used
* for signal handling (on the main thread), there is a (slim) * for signal handling (on the main thread), there is a (slim)
* chance that a signal is delivered on the same thread while we * chance that a signal is delivered on the same thread while we
...@@ -345,7 +385,9 @@ Py_AddPendingCall(int (*func)(void *), void *arg) ...@@ -345,7 +385,9 @@ Py_AddPendingCall(int (*func)(void *), void *arg)
* We also check for lock being NULL, in the unlikely case that * We also check for lock being NULL, in the unlikely case that
* this function is called before any bytecode evaluation takes place. * this function is called before any bytecode evaluation takes place.
*/ */
PyThread_type_lock lock = interp->ceval.pending.lock;
if (lock != NULL) { if (lock != NULL) {
int i;
for (i = 0; i<100; i++) { for (i = 0; i<100; i++) {
if (PyThread_acquire_lock(lock, NOWAIT_LOCK)) if (PyThread_acquire_lock(lock, NOWAIT_LOCK))
break; break;
...@@ -354,17 +396,21 @@ Py_AddPendingCall(int (*func)(void *), void *arg) ...@@ -354,17 +396,21 @@ Py_AddPendingCall(int (*func)(void *), void *arg)
return -1; return -1;
} }
i = _PyRuntime.ceval.pending.last; int result = -1;
j = (i + 1) % NPENDINGCALLS; if (interp->finalizing) {
if (j == _PyRuntime.ceval.pending.first) { PyObject *exc, *val, *tb;
result = -1; /* Queue full */ PyErr_Fetch(&exc, &val, &tb);
} else { PyErr_SetString(PyExc_SystemError, "Py_AddPendingCall: cannot add pending calls (interpreter shutting down)");
_PyRuntime.ceval.pending.calls[i].func = func; PyErr_Print();
_PyRuntime.ceval.pending.calls[i].arg = arg; PyErr_Restore(exc, val, tb);
_PyRuntime.ceval.pending.last = j; goto done;
} }
result = _add_pending_call(interp, thread_id, func, arg);
/* signal main loop */ /* signal main loop */
SIGNAL_PENDING_CALLS(); SIGNAL_PENDING_CALLS(interp);
done:
if (lock != NULL) if (lock != NULL)
PyThread_release_lock(lock); PyThread_release_lock(lock);
return result; return result;
...@@ -374,9 +420,7 @@ static int ...@@ -374,9 +420,7 @@ static int
handle_signals(void) handle_signals(void)
{ {
/* Only handle signals on main thread. */ /* Only handle signals on main thread. */
if (_PyRuntime.ceval.pending.main_thread && if (PyThread_get_thread_ident() != _PyRuntime.main_thread) {
PyThread_get_thread_ident() != _PyRuntime.ceval.pending.main_thread)
{
return 0; return 0;
} }
/* /*
...@@ -396,17 +440,10 @@ handle_signals(void) ...@@ -396,17 +440,10 @@ handle_signals(void)
} }
static int static int
make_pending_calls(void) make_pending_calls(PyInterpreterState *interp)
{ {
static int busy = 0; static int busy = 0;
/* only service pending calls on main thread */
if (_PyRuntime.ceval.pending.main_thread &&
PyThread_get_thread_ident() != _PyRuntime.ceval.pending.main_thread)
{
return 0;
}
/* don't perform recursive pending calls */ /* don't perform recursive pending calls */
if (busy) { if (busy) {
return 0; return 0;
...@@ -414,13 +451,13 @@ make_pending_calls(void) ...@@ -414,13 +451,13 @@ make_pending_calls(void)
busy = 1; busy = 1;
/* unsignal before starting to call callbacks, so that any callback /* unsignal before starting to call callbacks, so that any callback
added in-between re-signals */ added in-between re-signals */
UNSIGNAL_PENDING_CALLS(); UNSIGNAL_PENDING_CALLS(interp);
int res = 0; int res = 0;
if (!_PyRuntime.ceval.pending.lock) { if (!interp->ceval.pending.lock) {
/* initial allocation of the lock */ /* initial allocation of the lock */
_PyRuntime.ceval.pending.lock = PyThread_allocate_lock(); interp->ceval.pending.lock = PyThread_allocate_lock();
if (_PyRuntime.ceval.pending.lock == NULL) { if (interp->ceval.pending.lock == NULL) {
res = -1; res = -1;
goto error; goto error;
} }
...@@ -428,24 +465,18 @@ make_pending_calls(void) ...@@ -428,24 +465,18 @@ make_pending_calls(void)
/* perform a bounded number of calls, in case of recursion */ /* perform a bounded number of calls, in case of recursion */
for (int i=0; i<NPENDINGCALLS; i++) { for (int i=0; i<NPENDINGCALLS; i++) {
int j; int (*func)(void *) = NULL;
int (*func)(void *);
void *arg = NULL; void *arg = NULL;
/* pop one item off the queue while holding the lock */ /* pop one item off the queue while holding the lock */
PyThread_acquire_lock(_PyRuntime.ceval.pending.lock, WAIT_LOCK); PyThread_acquire_lock(interp->ceval.pending.lock, WAIT_LOCK);
j = _PyRuntime.ceval.pending.first; _pop_pending_call(interp, &func, &arg);
if (j == _PyRuntime.ceval.pending.last) { PyThread_release_lock(interp->ceval.pending.lock);
func = NULL; /* Queue empty */
} else {
func = _PyRuntime.ceval.pending.calls[j].func;
arg = _PyRuntime.ceval.pending.calls[j].arg;
_PyRuntime.ceval.pending.first = (j + 1) % NPENDINGCALLS;
}
PyThread_release_lock(_PyRuntime.ceval.pending.lock);
/* having released the lock, perform the callback */ /* having released the lock, perform the callback */
if (func == NULL) if (func == NULL) {
break; break;
}
res = func(arg); res = func(arg);
if (res) { if (res) {
goto error; goto error;
...@@ -457,10 +488,18 @@ make_pending_calls(void) ...@@ -457,10 +488,18 @@ make_pending_calls(void)
error: error:
busy = 0; busy = 0;
SIGNAL_PENDING_CALLS(); SIGNAL_PENDING_CALLS(interp); /* We're not done yet */
return res; return res;
} }
int
_Py_MakePendingCalls(PyInterpreterState *interp)
{
assert(PyGILState_Check());
return make_pending_calls(interp);
}
/* Py_MakePendingCalls() is a simple wrapper for the sake /* Py_MakePendingCalls() is a simple wrapper for the sake
of backward-compatibility. */ of backward-compatibility. */
int int
...@@ -475,12 +514,8 @@ Py_MakePendingCalls(void) ...@@ -475,12 +514,8 @@ Py_MakePendingCalls(void)
return res; return res;
} }
res = make_pending_calls(); PyInterpreterState *interp = _PyRuntime.interpreters.main;
if (res != 0) { return make_pending_calls(interp);
return res;
}
return 0;
} }
/* The interpreter's recursion limit */ /* The interpreter's recursion limit */
...@@ -687,7 +722,7 @@ _PyEval_EvalFrameDefault(PyFrameObject *f, int throwflag) ...@@ -687,7 +722,7 @@ _PyEval_EvalFrameDefault(PyFrameObject *f, int throwflag)
#define DISPATCH() \ #define DISPATCH() \
{ \ { \
if (!_Py_atomic_load_relaxed(&_PyRuntime.ceval.eval_breaker)) { \ if (!_Py_atomic_load_relaxed(&tstate->interp->ceval.eval_breaker)) { \
FAST_DISPATCH(); \ FAST_DISPATCH(); \
} \ } \
continue; \ continue; \
...@@ -989,7 +1024,7 @@ main_loop: ...@@ -989,7 +1024,7 @@ main_loop:
async I/O handler); see Py_AddPendingCall() and async I/O handler); see Py_AddPendingCall() and
Py_MakePendingCalls() above. */ Py_MakePendingCalls() above. */
if (_Py_atomic_load_relaxed(&_PyRuntime.ceval.eval_breaker)) { if (_Py_atomic_load_relaxed(&(tstate->interp->ceval.eval_breaker))) {
opcode = _Py_OPCODE(*next_instr); opcode = _Py_OPCODE(*next_instr);
if (opcode == SETUP_FINALLY || if (opcode == SETUP_FINALLY ||
opcode == SETUP_WITH || opcode == SETUP_WITH ||
...@@ -1022,9 +1057,9 @@ main_loop: ...@@ -1022,9 +1057,9 @@ main_loop:
} }
} }
if (_Py_atomic_load_relaxed( if (_Py_atomic_load_relaxed(
&_PyRuntime.ceval.pending.calls_to_do)) &(tstate->interp->ceval.pending.calls_to_do)))
{ {
if (make_pending_calls() != 0) { if (_Py_MakePendingCalls(tstate->interp) != 0) {
goto error; goto error;
} }
} }
...@@ -1056,7 +1091,7 @@ main_loop: ...@@ -1056,7 +1091,7 @@ main_loop:
if (tstate->async_exc != NULL) { if (tstate->async_exc != NULL) {
PyObject *exc = tstate->async_exc; PyObject *exc = tstate->async_exc;
tstate->async_exc = NULL; tstate->async_exc = NULL;
UNSIGNAL_ASYNC_EXC(); UNSIGNAL_ASYNC_EXC(tstate->interp);
PyErr_SetNone(exc); PyErr_SetNone(exc);
Py_DECREF(exc); Py_DECREF(exc);
goto error; goto error;
......
...@@ -176,7 +176,7 @@ static void drop_gil(PyThreadState *tstate) ...@@ -176,7 +176,7 @@ static void drop_gil(PyThreadState *tstate)
&_PyRuntime.ceval.gil.last_holder) &_PyRuntime.ceval.gil.last_holder)
) == tstate) ) == tstate)
{ {
RESET_GIL_DROP_REQUEST(); RESET_GIL_DROP_REQUEST(tstate->interp);
/* NOTE: if COND_WAIT does not atomically start waiting when /* NOTE: if COND_WAIT does not atomically start waiting when
releasing the mutex, another thread can run through, take releasing the mutex, another thread can run through, take
the GIL and drop it again, and reset the condition the GIL and drop it again, and reset the condition
...@@ -213,7 +213,7 @@ static void take_gil(PyThreadState *tstate) ...@@ -213,7 +213,7 @@ static void take_gil(PyThreadState *tstate)
if (timed_out && if (timed_out &&
_Py_atomic_load_relaxed(&_PyRuntime.ceval.gil.locked) && _Py_atomic_load_relaxed(&_PyRuntime.ceval.gil.locked) &&
_PyRuntime.ceval.gil.switch_number == saved_switchnum) { _PyRuntime.ceval.gil.switch_number == saved_switchnum) {
SET_GIL_DROP_REQUEST(); SET_GIL_DROP_REQUEST(tstate->interp);
} }
} }
_ready: _ready:
...@@ -239,10 +239,10 @@ _ready: ...@@ -239,10 +239,10 @@ _ready:
MUTEX_UNLOCK(_PyRuntime.ceval.gil.switch_mutex); MUTEX_UNLOCK(_PyRuntime.ceval.gil.switch_mutex);
#endif #endif
if (_Py_atomic_load_relaxed(&_PyRuntime.ceval.gil_drop_request)) { if (_Py_atomic_load_relaxed(&_PyRuntime.ceval.gil_drop_request)) {
RESET_GIL_DROP_REQUEST(); RESET_GIL_DROP_REQUEST(tstate->interp);
} }
if (tstate->async_exc != NULL) { if (tstate->async_exc != NULL) {
_PyEval_SignalAsyncExc(); _PyEval_SignalAsyncExc(tstate->interp);
} }
MUTEX_UNLOCK(_PyRuntime.ceval.gil.mutex); MUTEX_UNLOCK(_PyRuntime.ceval.gil.mutex);
......
...@@ -1459,8 +1459,32 @@ Py_EndInterpreter(PyThreadState *tstate) ...@@ -1459,8 +1459,32 @@ Py_EndInterpreter(PyThreadState *tstate)
if (tstate->frame != NULL) if (tstate->frame != NULL)
Py_FatalError("Py_EndInterpreter: thread still has a frame"); Py_FatalError("Py_EndInterpreter: thread still has a frame");
// Mark as finalizing.
if (interp->ceval.pending.lock != NULL) {
PyThread_acquire_lock(interp->ceval.pending.lock, 1);
}
interp->finalizing = 1;
if (interp->ceval.pending.lock != NULL) {
PyThread_release_lock(interp->ceval.pending.lock);
}
// Wrap up existing threads.
wait_for_thread_shutdown(); wait_for_thread_shutdown();
// Make any pending calls.
if (_Py_atomic_load_relaxed(
&(interp->ceval.pending.calls_to_do)))
{
// XXX Ensure that the interpreter is running in the current thread?
if (_Py_MakePendingCalls(interp) < 0) {
PyObject *exc, *val, *tb;
PyErr_Fetch(&exc, &val, &tb);
PyErr_BadInternalCall();
_PyErr_ChainExceptions(exc, val, tb);
PyErr_Print();
}
}
call_py_exitfuncs(interp); call_py_exitfuncs(interp);
if (tstate != interp->tstate_head || tstate->next != NULL) if (tstate != interp->tstate_head || tstate->next != NULL)
......
...@@ -132,28 +132,19 @@ PyInterpreterState_New(void) ...@@ -132,28 +132,19 @@ PyInterpreterState_New(void)
return NULL; return NULL;
} }
memset(interp, 0, sizeof(*interp));
interp->id_refcount = -1; interp->id_refcount = -1;
interp->id_mutex = NULL;
interp->modules = NULL;
interp->modules_by_index = NULL;
interp->sysdict = NULL;
interp->builtins = NULL;
interp->builtins_copy = NULL;
interp->tstate_head = NULL;
interp->check_interval = 100; interp->check_interval = 100;
interp->num_threads = 0;
interp->pythread_stacksize = 0; interp->ceval.pending.lock = PyThread_allocate_lock();
interp->codec_search_path = NULL; if (interp->ceval.pending.lock == NULL) {
interp->codec_search_cache = NULL; PyErr_SetString(PyExc_RuntimeError,
interp->codec_error_registry = NULL; "failed to create interpreter ceval pending mutex");
interp->codecs_initialized = 0; return NULL;
interp->fscodec_initialized = 0; }
interp->core_config = _PyCoreConfig_INIT; interp->core_config = _PyCoreConfig_INIT;
interp->config = _PyMainInterpreterConfig_INIT; interp->config = _PyMainInterpreterConfig_INIT;
interp->importlib = NULL;
interp->import_func = NULL;
interp->eval_frame = _PyEval_EvalFrameDefault; interp->eval_frame = _PyEval_EvalFrameDefault;
interp->co_extra_user_count = 0;
#ifdef HAVE_DLOPEN #ifdef HAVE_DLOPEN
#if HAVE_DECL_RTLD_NOW #if HAVE_DECL_RTLD_NOW
interp->dlopenflags = RTLD_NOW; interp->dlopenflags = RTLD_NOW;
...@@ -161,13 +152,10 @@ PyInterpreterState_New(void) ...@@ -161,13 +152,10 @@ PyInterpreterState_New(void)
interp->dlopenflags = RTLD_LAZY; interp->dlopenflags = RTLD_LAZY;
#endif #endif
#endif #endif
#ifdef HAVE_FORK
interp->before_forkers = NULL; if (_PyRuntime.main_thread == 0) {
interp->after_forkers_parent = NULL; _PyRuntime.main_thread = PyThread_get_thread_ident();
interp->after_forkers_child = NULL; }
#endif
interp->pyexitfunc = NULL;
interp->pyexitmodule = NULL;
HEAD_LOCK(); HEAD_LOCK();
if (_PyRuntime.interpreters.next_id < 0) { if (_PyRuntime.interpreters.next_id < 0) {
...@@ -222,6 +210,9 @@ PyInterpreterState_Clear(PyInterpreterState *interp) ...@@ -222,6 +210,9 @@ PyInterpreterState_Clear(PyInterpreterState *interp)
Py_CLEAR(interp->after_forkers_parent); Py_CLEAR(interp->after_forkers_parent);
Py_CLEAR(interp->after_forkers_child); Py_CLEAR(interp->after_forkers_child);
#endif #endif
// XXX Once we have one allocator per interpreter (i.e.
// per-interpreter GC) we must ensure that all of the interpreter's
// objects have been cleaned up at the point.
} }
...@@ -262,6 +253,9 @@ PyInterpreterState_Delete(PyInterpreterState *interp) ...@@ -262,6 +253,9 @@ PyInterpreterState_Delete(PyInterpreterState *interp)
if (interp->id_mutex != NULL) { if (interp->id_mutex != NULL) {
PyThread_free_lock(interp->id_mutex); PyThread_free_lock(interp->id_mutex);
} }
if (interp->ceval.pending.lock != NULL) {
PyThread_free_lock(interp->ceval.pending.lock);
}
PyMem_RawFree(interp); PyMem_RawFree(interp);
} }
...@@ -871,7 +865,7 @@ PyThreadState_SetAsyncExc(unsigned long id, PyObject *exc) ...@@ -871,7 +865,7 @@ PyThreadState_SetAsyncExc(unsigned long id, PyObject *exc)
p->async_exc = exc; p->async_exc = exc;
HEAD_UNLOCK(); HEAD_UNLOCK();
Py_XDECREF(old_exc); Py_XDECREF(old_exc);
_PyEval_SignalAsyncExc(); _PyEval_SignalAsyncExc(interp);
return 1; return 1;
} }
} }
...@@ -1338,6 +1332,7 @@ _PyCrossInterpreterData_Release(_PyCrossInterpreterData *data) ...@@ -1338,6 +1332,7 @@ _PyCrossInterpreterData_Release(_PyCrossInterpreterData *data)
} }
// "Release" the data and/or the object. // "Release" the data and/or the object.
// XXX Use _Py_AddPendingCall().
_call_in_interpreter(interp, _release_xidata, data); _call_in_interpreter(interp, _release_xidata, data);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment