Commit 4d61e6e3 authored by Victor Stinner's avatar Victor Stinner Committed by GitHub

Revert: bpo-33608: Factor out a private, per-interpreter _Py_AddPendingCall()....

Revert: bpo-33608: Factor out a private, per-interpreter _Py_AddPendingCall(). (GH-11617) (GH-12159)

* Revert "bpo-36097: Use only public C-API in the_xxsubinterpreters module (adding as necessary). (#12003)"

This reverts commit bcfa450f.

* Revert "bpo-33608: Simplify ceval's DISPATCH by hoisting eval_breaker ahead of time. (gh-12062)"

This reverts commit bda918bf.

* Revert "bpo-33608: Use _Py_AddPendingCall() in _PyCrossInterpreterData_Release(). (gh-12024)"

This reverts commit b05b711a.

* Revert "bpo-33608: Factor out a private, per-interpreter _Py_AddPendingCall(). (GH-11617)"

This reverts commit ef4ac967.
parent f4b0a1c0
...@@ -221,7 +221,7 @@ PyAPI_FUNC(Py_ssize_t) _PyEval_RequestCodeExtraIndex(freefunc); ...@@ -221,7 +221,7 @@ PyAPI_FUNC(Py_ssize_t) _PyEval_RequestCodeExtraIndex(freefunc);
#ifndef Py_LIMITED_API #ifndef Py_LIMITED_API
PyAPI_FUNC(int) _PyEval_SliceIndex(PyObject *, Py_ssize_t *); PyAPI_FUNC(int) _PyEval_SliceIndex(PyObject *, Py_ssize_t *);
PyAPI_FUNC(int) _PyEval_SliceIndexNotNone(PyObject *, Py_ssize_t *); PyAPI_FUNC(int) _PyEval_SliceIndexNotNone(PyObject *, Py_ssize_t *);
PyAPI_FUNC(void) _PyEval_SignalAsyncExc(PyInterpreterState *); PyAPI_FUNC(void) _PyEval_SignalAsyncExc(void);
#endif #endif
/* Masks and values used by FORMAT_VALUE opcode. */ /* Masks and values used by FORMAT_VALUE opcode. */
......
#ifndef Py_CPYTHON_INTERPRETERIDOBJECT_H
# error "this header file must not be included directly"
#endif
#ifdef __cplusplus
extern "C" {
#endif
/* Interpreter ID Object */
PyAPI_DATA(PyTypeObject) _PyInterpreterID_Type;
PyAPI_FUNC(PyObject *) _PyInterpreterID_New(int64_t);
PyAPI_FUNC(PyObject *) _PyInterpreterState_GetIDObject(PyInterpreterState *);
PyAPI_FUNC(PyInterpreterState *) _PyInterpreterID_LookUp(PyObject *);
PyAPI_FUNC(int64_t) _Py_CoerceID(PyObject *);
#ifdef __cplusplus
}
#endif
...@@ -30,13 +30,9 @@ typedef struct { ...@@ -30,13 +30,9 @@ typedef struct {
(_PyMainInterpreterConfig){.install_signal_handlers = -1} (_PyMainInterpreterConfig){.install_signal_handlers = -1}
/* Note: _PyMainInterpreterConfig_INIT sets other fields to 0/NULL */ /* Note: _PyMainInterpreterConfig_INIT sets other fields to 0/NULL */
PyAPI_FUNC(int) _PyInterpreterState_RequiresIDRef(PyInterpreterState *);
PyAPI_FUNC(void) _PyInterpreterState_RequireIDRef(PyInterpreterState *, int);
PyAPI_FUNC(_PyCoreConfig *) _PyInterpreterState_GetCoreConfig(PyInterpreterState *); PyAPI_FUNC(_PyCoreConfig *) _PyInterpreterState_GetCoreConfig(PyInterpreterState *);
PyAPI_FUNC(_PyMainInterpreterConfig *) _PyInterpreterState_GetMainConfig(PyInterpreterState *); PyAPI_FUNC(_PyMainInterpreterConfig *) _PyInterpreterState_GetMainConfig(PyInterpreterState *);
PyAPI_FUNC(PyObject *) _PyInterpreterState_GetMainModule(PyInterpreterState *);
/* State unique per thread */ /* State unique per thread */
...@@ -218,65 +214,6 @@ PyAPI_FUNC(PyThreadState *) PyThreadState_Next(PyThreadState *); ...@@ -218,65 +214,6 @@ PyAPI_FUNC(PyThreadState *) PyThreadState_Next(PyThreadState *);
typedef struct _frame *(*PyThreadFrameGetter)(PyThreadState *self_); typedef struct _frame *(*PyThreadFrameGetter)(PyThreadState *self_);
/* cross-interpreter data */
struct _xid;
// _PyCrossInterpreterData is similar to Py_buffer as an effectively
// opaque struct that holds data outside the object machinery. This
// is necessary to pass safely between interpreters in the same process.
typedef struct _xid {
// data is the cross-interpreter-safe derivation of a Python object
// (see _PyObject_GetCrossInterpreterData). It will be NULL if the
// new_object func (below) encodes the data.
void *data;
// obj is the Python object from which the data was derived. This
// is non-NULL only if the data remains bound to the object in some
// way, such that the object must be "released" (via a decref) when
// the data is released. In that case the code that sets the field,
// likely a registered "crossinterpdatafunc", is responsible for
// ensuring it owns the reference (i.e. incref).
PyObject *obj;
// interp is the ID of the owning interpreter of the original
// object. It corresponds to the active interpreter when
// _PyObject_GetCrossInterpreterData() was called. This should only
// be set by the cross-interpreter machinery.
//
// We use the ID rather than the PyInterpreterState to avoid issues
// with deleted interpreters. Note that IDs are never re-used, so
// each one will always correspond to a specific interpreter
// (whether still alive or not).
int64_t interp;
// new_object is a function that returns a new object in the current
// interpreter given the data. The resulting object (a new
// reference) will be equivalent to the original object. This field
// is required.
PyObject *(*new_object)(struct _xid *);
// free is called when the data is released. If it is NULL then
// nothing will be done to free the data. For some types this is
// okay (e.g. bytes) and for those types this field should be set
// to NULL. However, for most the data was allocated just for
// cross-interpreter use, so it must be freed when
// _PyCrossInterpreterData_Release is called or the memory will
// leak. In that case, at the very least this field should be set
// to PyMem_RawFree (the default if not explicitly set to NULL).
// The call will happen with the original interpreter activated.
void (*free)(void *);
} _PyCrossInterpreterData;
PyAPI_FUNC(int) _PyObject_GetCrossInterpreterData(PyObject *, _PyCrossInterpreterData *);
PyAPI_FUNC(PyObject *) _PyCrossInterpreterData_NewObject(_PyCrossInterpreterData *);
PyAPI_FUNC(void) _PyCrossInterpreterData_Release(_PyCrossInterpreterData *);
PyAPI_FUNC(int) _PyObject_CheckCrossInterpreterData(PyObject *);
/* cross-interpreter data registry */
typedef int (*crossinterpdatafunc)(PyObject *, struct _xid *);
PyAPI_FUNC(int) _PyCrossInterpreterData_RegisterClass(PyTypeObject *, crossinterpdatafunc);
PyAPI_FUNC(crossinterpdatafunc) _PyCrossInterpreterData_Lookup(PyObject *);
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif
...@@ -58,10 +58,10 @@ typedef struct _Py_atomic_int { ...@@ -58,10 +58,10 @@ typedef struct _Py_atomic_int {
atomic_thread_fence(ORDER) atomic_thread_fence(ORDER)
#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \ #define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \
atomic_store_explicit(&((ATOMIC_VAL)->_value), NEW_VAL, ORDER) atomic_store_explicit(&(ATOMIC_VAL)->_value, NEW_VAL, ORDER)
#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \ #define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \
atomic_load_explicit(&((ATOMIC_VAL)->_value), ORDER) atomic_load_explicit(&(ATOMIC_VAL)->_value, ORDER)
/* Use builtin atomic operations in GCC >= 4.7 */ /* Use builtin atomic operations in GCC >= 4.7 */
#elif defined(HAVE_BUILTIN_ATOMIC) #elif defined(HAVE_BUILTIN_ATOMIC)
...@@ -92,14 +92,14 @@ typedef struct _Py_atomic_int { ...@@ -92,14 +92,14 @@ typedef struct _Py_atomic_int {
(assert((ORDER) == __ATOMIC_RELAXED \ (assert((ORDER) == __ATOMIC_RELAXED \
|| (ORDER) == __ATOMIC_SEQ_CST \ || (ORDER) == __ATOMIC_SEQ_CST \
|| (ORDER) == __ATOMIC_RELEASE), \ || (ORDER) == __ATOMIC_RELEASE), \
__atomic_store_n(&((ATOMIC_VAL)->_value), NEW_VAL, ORDER)) __atomic_store_n(&(ATOMIC_VAL)->_value, NEW_VAL, ORDER))
#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \ #define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \
(assert((ORDER) == __ATOMIC_RELAXED \ (assert((ORDER) == __ATOMIC_RELAXED \
|| (ORDER) == __ATOMIC_SEQ_CST \ || (ORDER) == __ATOMIC_SEQ_CST \
|| (ORDER) == __ATOMIC_ACQUIRE \ || (ORDER) == __ATOMIC_ACQUIRE \
|| (ORDER) == __ATOMIC_CONSUME), \ || (ORDER) == __ATOMIC_CONSUME), \
__atomic_load_n(&((ATOMIC_VAL)->_value), ORDER)) __atomic_load_n(&(ATOMIC_VAL)->_value, ORDER))
/* Only support GCC (for expression statements) and x86 (for simple /* Only support GCC (for expression statements) and x86 (for simple
* atomic semantics) and MSVC x86/x64/ARM */ * atomic semantics) and MSVC x86/x64/ARM */
...@@ -324,7 +324,7 @@ inline intptr_t _Py_atomic_load_64bit(volatile uintptr_t* value, int order) { ...@@ -324,7 +324,7 @@ inline intptr_t _Py_atomic_load_64bit(volatile uintptr_t* value, int order) {
} }
#else #else
#define _Py_atomic_load_64bit(ATOMIC_VAL, ORDER) *(ATOMIC_VAL) #define _Py_atomic_load_64bit(ATOMIC_VAL, ORDER) *ATOMIC_VAL
#endif #endif
inline int _Py_atomic_load_32bit(volatile int* value, int order) { inline int _Py_atomic_load_32bit(volatile int* value, int order) {
...@@ -359,15 +359,15 @@ inline int _Py_atomic_load_32bit(volatile int* value, int order) { ...@@ -359,15 +359,15 @@ inline int _Py_atomic_load_32bit(volatile int* value, int order) {
} }
#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \ #define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \
if (sizeof((ATOMIC_VAL)->_value) == 8) { \ if (sizeof(*ATOMIC_VAL._value) == 8) { \
_Py_atomic_store_64bit((volatile long long*)&((ATOMIC_VAL)->_value), NEW_VAL, ORDER) } else { \ _Py_atomic_store_64bit((volatile long long*)ATOMIC_VAL._value, NEW_VAL, ORDER) } else { \
_Py_atomic_store_32bit((volatile long*)&((ATOMIC_VAL)->_value), NEW_VAL, ORDER) } _Py_atomic_store_32bit((volatile long*)ATOMIC_VAL._value, NEW_VAL, ORDER) }
#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \ #define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \
( \ ( \
sizeof((ATOMIC_VAL)->_value) == 8 ? \ sizeof(*(ATOMIC_VAL._value)) == 8 ? \
_Py_atomic_load_64bit((volatile long long*)&((ATOMIC_VAL)->_value), ORDER) : \ _Py_atomic_load_64bit((volatile long long*)ATOMIC_VAL._value, ORDER) : \
_Py_atomic_load_32bit((volatile long*)&((ATOMIC_VAL)->_value), ORDER) \ _Py_atomic_load_32bit((volatile long*)ATOMIC_VAL._value, ORDER) \
) )
#elif defined(_M_ARM) || defined(_M_ARM64) #elif defined(_M_ARM) || defined(_M_ARM64)
typedef enum _Py_memory_order { typedef enum _Py_memory_order {
...@@ -391,13 +391,13 @@ typedef struct _Py_atomic_int { ...@@ -391,13 +391,13 @@ typedef struct _Py_atomic_int {
#define _Py_atomic_store_64bit(ATOMIC_VAL, NEW_VAL, ORDER) \ #define _Py_atomic_store_64bit(ATOMIC_VAL, NEW_VAL, ORDER) \
switch (ORDER) { \ switch (ORDER) { \
case _Py_memory_order_acquire: \ case _Py_memory_order_acquire: \
_InterlockedExchange64_acq((__int64 volatile*)&((ATOMIC_VAL)->_value), (__int64)NEW_VAL); \ _InterlockedExchange64_acq((__int64 volatile*)ATOMIC_VAL, (__int64)NEW_VAL); \
break; \ break; \
case _Py_memory_order_release: \ case _Py_memory_order_release: \
_InterlockedExchange64_rel((__int64 volatile*)&((ATOMIC_VAL)->_value), (__int64)NEW_VAL); \ _InterlockedExchange64_rel((__int64 volatile*)ATOMIC_VAL, (__int64)NEW_VAL); \
break; \ break; \
default: \ default: \
_InterlockedExchange64((__int64 volatile*)&((ATOMIC_VAL)->_value), (__int64)NEW_VAL); \ _InterlockedExchange64((__int64 volatile*)ATOMIC_VAL, (__int64)NEW_VAL); \
break; \ break; \
} }
#else #else
...@@ -407,13 +407,13 @@ typedef struct _Py_atomic_int { ...@@ -407,13 +407,13 @@ typedef struct _Py_atomic_int {
#define _Py_atomic_store_32bit(ATOMIC_VAL, NEW_VAL, ORDER) \ #define _Py_atomic_store_32bit(ATOMIC_VAL, NEW_VAL, ORDER) \
switch (ORDER) { \ switch (ORDER) { \
case _Py_memory_order_acquire: \ case _Py_memory_order_acquire: \
_InterlockedExchange_acq((volatile long*)&((ATOMIC_VAL)->_value), (int)NEW_VAL); \ _InterlockedExchange_acq((volatile long*)ATOMIC_VAL, (int)NEW_VAL); \
break; \ break; \
case _Py_memory_order_release: \ case _Py_memory_order_release: \
_InterlockedExchange_rel((volatile long*)&((ATOMIC_VAL)->_value), (int)NEW_VAL); \ _InterlockedExchange_rel((volatile long*)ATOMIC_VAL, (int)NEW_VAL); \
break; \ break; \
default: \ default: \
_InterlockedExchange((volatile long*)&((ATOMIC_VAL)->_value), (int)NEW_VAL); \ _InterlockedExchange((volatile long*)ATOMIC_VAL, (int)NEW_VAL); \
break; \ break; \
} }
...@@ -454,7 +454,7 @@ inline intptr_t _Py_atomic_load_64bit(volatile uintptr_t* value, int order) { ...@@ -454,7 +454,7 @@ inline intptr_t _Py_atomic_load_64bit(volatile uintptr_t* value, int order) {
} }
#else #else
#define _Py_atomic_load_64bit(ATOMIC_VAL, ORDER) *(ATOMIC_VAL) #define _Py_atomic_load_64bit(ATOMIC_VAL, ORDER) *ATOMIC_VAL
#endif #endif
inline int _Py_atomic_load_32bit(volatile int* value, int order) { inline int _Py_atomic_load_32bit(volatile int* value, int order) {
...@@ -489,15 +489,15 @@ inline int _Py_atomic_load_32bit(volatile int* value, int order) { ...@@ -489,15 +489,15 @@ inline int _Py_atomic_load_32bit(volatile int* value, int order) {
} }
#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \ #define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \
if (sizeof((ATOMIC_VAL)->_value) == 8) { \ if (sizeof(*ATOMIC_VAL._value) == 8) { \
_Py_atomic_store_64bit(&((ATOMIC_VAL)->_value), NEW_VAL, ORDER) } else { \ _Py_atomic_store_64bit(ATOMIC_VAL._value, NEW_VAL, ORDER) } else { \
_Py_atomic_store_32bit(&((ATOMIC_VAL)->_value), NEW_VAL, ORDER) } _Py_atomic_store_32bit(ATOMIC_VAL._value, NEW_VAL, ORDER) }
#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \ #define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \
( \ ( \
sizeof((ATOMIC_VAL)->_value) == 8 ? \ sizeof(*(ATOMIC_VAL._value)) == 8 ? \
_Py_atomic_load_64bit(&((ATOMIC_VAL)->_value), ORDER) : \ _Py_atomic_load_64bit(ATOMIC_VAL._value, ORDER) : \
_Py_atomic_load_32bit(&((ATOMIC_VAL)->_value), ORDER) \ _Py_atomic_load_32bit(ATOMIC_VAL._value, ORDER) \
) )
#endif #endif
#else /* !gcc x86 !_msc_ver */ #else /* !gcc x86 !_msc_ver */
......
...@@ -11,12 +11,8 @@ extern "C" { ...@@ -11,12 +11,8 @@ extern "C" {
#include "pycore_atomic.h" #include "pycore_atomic.h"
#include "pythread.h" #include "pythread.h"
struct _is; // See PyInterpreterState in cpython/pystate.h.
PyAPI_FUNC(int) _Py_AddPendingCall(struct _is*, unsigned long, int (*)(void *), void *);
PyAPI_FUNC(int) _Py_MakePendingCalls(struct _is*);
struct _pending_calls { struct _pending_calls {
unsigned long main_thread;
PyThread_type_lock lock; PyThread_type_lock lock;
/* Request for running pending calls. */ /* Request for running pending calls. */
_Py_atomic_int calls_to_do; _Py_atomic_int calls_to_do;
...@@ -26,7 +22,6 @@ struct _pending_calls { ...@@ -26,7 +22,6 @@ struct _pending_calls {
int async_exc; int async_exc;
#define NPENDINGCALLS 32 #define NPENDINGCALLS 32
struct { struct {
unsigned long thread_id;
int (*func)(void *); int (*func)(void *);
void *arg; void *arg;
} calls[NPENDINGCALLS]; } calls[NPENDINGCALLS];
...@@ -34,13 +29,6 @@ struct _pending_calls { ...@@ -34,13 +29,6 @@ struct _pending_calls {
int last; int last;
}; };
struct _ceval_interpreter_state {
/* This single variable consolidates all requests to break out of
the fast path in the eval loop. */
_Py_atomic_int eval_breaker;
struct _pending_calls pending;
};
#include "pycore_gil.h" #include "pycore_gil.h"
struct _ceval_runtime_state { struct _ceval_runtime_state {
...@@ -51,8 +39,12 @@ struct _ceval_runtime_state { ...@@ -51,8 +39,12 @@ struct _ceval_runtime_state {
c_tracefunc. This speeds up the if statement in c_tracefunc. This speeds up the if statement in
PyEval_EvalFrameEx() after fast_next_opcode. */ PyEval_EvalFrameEx() after fast_next_opcode. */
int tracing_possible; int tracing_possible;
/* This single variable consolidates all requests to break out of
the fast path in the eval loop. */
_Py_atomic_int eval_breaker;
/* Request for dropping the GIL */ /* Request for dropping the GIL */
_Py_atomic_int gil_drop_request; _Py_atomic_int gil_drop_request;
struct _pending_calls pending;
/* Request for checking signals. */ /* Request for checking signals. */
_Py_atomic_int signals_pending; _Py_atomic_int signals_pending;
struct _gil_runtime_state gil; struct _gil_runtime_state gil;
......
...@@ -11,7 +11,6 @@ extern "C" { ...@@ -11,7 +11,6 @@ extern "C" {
#include "pystate.h" #include "pystate.h"
#include "pythread.h" #include "pythread.h"
#include "pycore_atomic.h"
#include "pycore_ceval.h" #include "pycore_ceval.h"
#include "pycore_pathconfig.h" #include "pycore_pathconfig.h"
#include "pycore_pymem.h" #include "pycore_pymem.h"
...@@ -30,11 +29,8 @@ struct _is { ...@@ -30,11 +29,8 @@ struct _is {
int64_t id; int64_t id;
int64_t id_refcount; int64_t id_refcount;
int requires_idref;
PyThread_type_lock id_mutex; PyThread_type_lock id_mutex;
int finalizing;
PyObject *modules; PyObject *modules;
PyObject *modules_by_index; PyObject *modules_by_index;
PyObject *sysdict; PyObject *sysdict;
...@@ -82,8 +78,6 @@ struct _is { ...@@ -82,8 +78,6 @@ struct _is {
PyObject *pyexitmodule; PyObject *pyexitmodule;
uint64_t tstate_next_unique_id; uint64_t tstate_next_unique_id;
struct _ceval_interpreter_state ceval;
}; };
PyAPI_FUNC(struct _is*) _PyInterpreterState_LookUpID(PY_INT64_T); PyAPI_FUNC(struct _is*) _PyInterpreterState_LookUpID(PY_INT64_T);
...@@ -93,12 +87,66 @@ PyAPI_FUNC(void) _PyInterpreterState_IDIncref(struct _is *); ...@@ -93,12 +87,66 @@ PyAPI_FUNC(void) _PyInterpreterState_IDIncref(struct _is *);
PyAPI_FUNC(void) _PyInterpreterState_IDDecref(struct _is *); PyAPI_FUNC(void) _PyInterpreterState_IDDecref(struct _is *);
/* cross-interpreter data */
struct _xid;
// _PyCrossInterpreterData is similar to Py_buffer as an effectively
// opaque struct that holds data outside the object machinery. This
// is necessary to pass safely between interpreters in the same process.
typedef struct _xid {
// data is the cross-interpreter-safe derivation of a Python object
// (see _PyObject_GetCrossInterpreterData). It will be NULL if the
// new_object func (below) encodes the data.
void *data;
// obj is the Python object from which the data was derived. This
// is non-NULL only if the data remains bound to the object in some
// way, such that the object must be "released" (via a decref) when
// the data is released. In that case the code that sets the field,
// likely a registered "crossinterpdatafunc", is responsible for
// ensuring it owns the reference (i.e. incref).
PyObject *obj;
// interp is the ID of the owning interpreter of the original
// object. It corresponds to the active interpreter when
// _PyObject_GetCrossInterpreterData() was called. This should only
// be set by the cross-interpreter machinery.
//
// We use the ID rather than the PyInterpreterState to avoid issues
// with deleted interpreters.
int64_t interp;
// new_object is a function that returns a new object in the current
// interpreter given the data. The resulting object (a new
// reference) will be equivalent to the original object. This field
// is required.
PyObject *(*new_object)(struct _xid *);
// free is called when the data is released. If it is NULL then
// nothing will be done to free the data. For some types this is
// okay (e.g. bytes) and for those types this field should be set
// to NULL. However, for most the data was allocated just for
// cross-interpreter use, so it must be freed when
// _PyCrossInterpreterData_Release is called or the memory will
// leak. In that case, at the very least this field should be set
// to PyMem_RawFree (the default if not explicitly set to NULL).
// The call will happen with the original interpreter activated.
void (*free)(void *);
} _PyCrossInterpreterData;
typedef int (*crossinterpdatafunc)(PyObject *, _PyCrossInterpreterData *);
PyAPI_FUNC(int) _PyObject_CheckCrossInterpreterData(PyObject *);
PyAPI_FUNC(int) _PyObject_GetCrossInterpreterData(PyObject *, _PyCrossInterpreterData *);
PyAPI_FUNC(PyObject *) _PyCrossInterpreterData_NewObject(_PyCrossInterpreterData *);
PyAPI_FUNC(void) _PyCrossInterpreterData_Release(_PyCrossInterpreterData *);
/* cross-interpreter data registry */ /* cross-interpreter data registry */
/* For now we use a global registry of shareable classes. An /* For now we use a global registry of shareable classes. An
alternative would be to add a tp_* slot for a class's alternative would be to add a tp_* slot for a class's
crossinterpdatafunc. It would be simpler and more efficient. */ crossinterpdatafunc. It would be simpler and more efficient. */
PyAPI_FUNC(int) _PyCrossInterpreterData_Register_Class(PyTypeObject *, crossinterpdatafunc);
PyAPI_FUNC(crossinterpdatafunc) _PyCrossInterpreterData_Lookup(PyObject *);
struct _xidregitem; struct _xidregitem;
struct _xidregitem { struct _xidregitem {
...@@ -159,8 +207,6 @@ typedef struct pyruntimestate { ...@@ -159,8 +207,6 @@ typedef struct pyruntimestate {
struct _xidregitem *head; struct _xidregitem *head;
} xidregistry; } xidregistry;
unsigned long main_thread;
#define NEXITFUNCS 32 #define NEXITFUNCS 32
void (*exitfuncs[NEXITFUNCS])(void); void (*exitfuncs[NEXITFUNCS])(void);
int nexitfuncs; int nexitfuncs;
......
#ifndef Py_INTERPRETERIDOBJECT_H
#define Py_INTERPRETERIDOBJECT_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_LIMITED_API
# define Py_CPYTHON_INTERPRETERIDOBJECT_H
# include "cpython/interpreteridobject.h"
# undef Py_CPYTHON_INTERPRETERIDOBJECT_H
#endif
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERPRETERIDOBJECT_H */
...@@ -391,7 +391,6 @@ OBJECT_OBJS= \ ...@@ -391,7 +391,6 @@ OBJECT_OBJS= \
Objects/floatobject.o \ Objects/floatobject.o \
Objects/frameobject.o \ Objects/frameobject.o \
Objects/funcobject.o \ Objects/funcobject.o \
Objects/interpreteridobject.o \
Objects/iterobject.o \ Objects/iterobject.o \
Objects/listobject.o \ Objects/listobject.o \
Objects/longobject.o \ Objects/longobject.o \
...@@ -978,7 +977,6 @@ PYTHON_HEADERS= \ ...@@ -978,7 +977,6 @@ PYTHON_HEADERS= \
$(srcdir)/Include/funcobject.h \ $(srcdir)/Include/funcobject.h \
$(srcdir)/Include/genobject.h \ $(srcdir)/Include/genobject.h \
$(srcdir)/Include/import.h \ $(srcdir)/Include/import.h \
$(srcdir)/Include/interpreteridobject.h \
$(srcdir)/Include/intrcheck.h \ $(srcdir)/Include/intrcheck.h \
$(srcdir)/Include/iterobject.h \ $(srcdir)/Include/iterobject.h \
$(srcdir)/Include/listobject.h \ $(srcdir)/Include/listobject.h \
...@@ -1041,7 +1039,6 @@ PYTHON_HEADERS= \ ...@@ -1041,7 +1039,6 @@ PYTHON_HEADERS= \
$(srcdir)/Include/cpython/abstract.h \ $(srcdir)/Include/cpython/abstract.h \
$(srcdir)/Include/cpython/coreconfig.h \ $(srcdir)/Include/cpython/coreconfig.h \
$(srcdir)/Include/cpython/dictobject.h \ $(srcdir)/Include/cpython/dictobject.h \
$(srcdir)/Include/cpython/interpreteridobject.h \
$(srcdir)/Include/cpython/object.h \ $(srcdir)/Include/cpython/object.h \
$(srcdir)/Include/cpython/objimpl.h \ $(srcdir)/Include/cpython/objimpl.h \
$(srcdir)/Include/cpython/pyerrors.h \ $(srcdir)/Include/cpython/pyerrors.h \
......
...@@ -2445,7 +2445,6 @@ pending_threadfunc(PyObject *self, PyObject *arg) ...@@ -2445,7 +2445,6 @@ pending_threadfunc(PyObject *self, PyObject *arg)
Py_INCREF(callable); Py_INCREF(callable);
Py_BEGIN_ALLOW_THREADS Py_BEGIN_ALLOW_THREADS
/* XXX Use the internal _Py_AddPendingCall(). */
r = Py_AddPendingCall(&_pending_callback, callable); r = Py_AddPendingCall(&_pending_callback, callable);
Py_END_ALLOW_THREADS Py_END_ALLOW_THREADS
......
This diff is collapsed.
...@@ -19,7 +19,6 @@ ...@@ -19,7 +19,6 @@
#include <process.h> #include <process.h>
#endif #endif
#endif #endif
#include "internal/pycore_pystate.h"
#ifdef HAVE_SIGNAL_H #ifdef HAVE_SIGNAL_H
#include <signal.h> #include <signal.h>
...@@ -296,10 +295,8 @@ trip_signal(int sig_num) ...@@ -296,10 +295,8 @@ trip_signal(int sig_num)
{ {
/* Py_AddPendingCall() isn't signal-safe, but we /* Py_AddPendingCall() isn't signal-safe, but we
still use it for this exceptional case. */ still use it for this exceptional case. */
_Py_AddPendingCall(_PyRuntime.interpreters.main, Py_AddPendingCall(report_wakeup_send_error,
main_thread, (void *)(intptr_t) last_error);
report_wakeup_send_error,
(void *)(intptr_t) last_error);
} }
} }
} }
...@@ -316,10 +313,8 @@ trip_signal(int sig_num) ...@@ -316,10 +313,8 @@ trip_signal(int sig_num)
{ {
/* Py_AddPendingCall() isn't signal-safe, but we /* Py_AddPendingCall() isn't signal-safe, but we
still use it for this exceptional case. */ still use it for this exceptional case. */
_Py_AddPendingCall(_PyRuntime.interpreters.main, Py_AddPendingCall(report_wakeup_write_error,
main_thread, (void *)(intptr_t)errno);
report_wakeup_write_error,
(void *)(intptr_t)errno);
} }
} }
} }
......
/* InterpreterID object */
#include "Python.h"
#include "internal/pycore_pystate.h"
#include "interpreteridobject.h"
int64_t
_Py_CoerceID(PyObject *orig)
{
PyObject *pyid = PyNumber_Long(orig);
if (pyid == NULL) {
if (PyErr_ExceptionMatches(PyExc_TypeError)) {
PyErr_Format(PyExc_TypeError,
"'id' must be a non-negative int, got %R", orig);
}
else {
PyErr_Format(PyExc_ValueError,
"'id' must be a non-negative int, got %R", orig);
}
return -1;
}
int64_t id = PyLong_AsLongLong(pyid);
Py_DECREF(pyid);
if (id == -1 && PyErr_Occurred() != NULL) {
if (!PyErr_ExceptionMatches(PyExc_OverflowError)) {
PyErr_Format(PyExc_ValueError,
"'id' must be a non-negative int, got %R", orig);
}
return -1;
}
if (id < 0) {
PyErr_Format(PyExc_ValueError,
"'id' must be a non-negative int, got %R", orig);
return -1;
}
return id;
}
typedef struct interpid {
PyObject_HEAD
int64_t id;
} interpid;
static interpid *
newinterpid(PyTypeObject *cls, int64_t id, int force)
{
PyInterpreterState *interp = _PyInterpreterState_LookUpID(id);
if (interp == NULL) {
if (force) {
PyErr_Clear();
}
else {
return NULL;
}
}
interpid *self = PyObject_New(interpid, cls);
if (self == NULL) {
return NULL;
}
self->id = id;
if (interp != NULL) {
_PyInterpreterState_IDIncref(interp);
}
return self;
}
static PyObject *
interpid_new(PyTypeObject *cls, PyObject *args, PyObject *kwds)
{
static char *kwlist[] = {"id", "force", NULL};
PyObject *idobj;
int force = 0;
if (!PyArg_ParseTupleAndKeywords(args, kwds,
"O|$p:InterpreterID.__init__", kwlist,
&idobj, &force)) {
return NULL;
}
// Coerce and check the ID.
int64_t id;
if (PyObject_TypeCheck(idobj, &_PyInterpreterID_Type)) {
id = ((interpid *)idobj)->id;
}
else {
id = _Py_CoerceID(idobj);
if (id < 0) {
return NULL;
}
}
return (PyObject *)newinterpid(cls, id, force);
}
static void
interpid_dealloc(PyObject *v)
{
int64_t id = ((interpid *)v)->id;
PyInterpreterState *interp = _PyInterpreterState_LookUpID(id);
if (interp != NULL) {
_PyInterpreterState_IDDecref(interp);
}
else {
// already deleted
PyErr_Clear();
}
Py_TYPE(v)->tp_free(v);
}
static PyObject *
interpid_repr(PyObject *self)
{
PyTypeObject *type = Py_TYPE(self);
const char *name = _PyType_Name(type);
interpid *id = (interpid *)self;
return PyUnicode_FromFormat("%s(%" PRId64 ")", name, id->id);
}
static PyObject *
interpid_str(PyObject *self)
{
interpid *id = (interpid *)self;
return PyUnicode_FromFormat("%" PRId64 "", id->id);
}
static PyObject *
interpid_int(PyObject *self)
{
interpid *id = (interpid *)self;
return PyLong_FromLongLong(id->id);
}
static PyNumberMethods interpid_as_number = {
0, /* nb_add */
0, /* nb_subtract */
0, /* nb_multiply */
0, /* nb_remainder */
0, /* nb_divmod */
0, /* nb_power */
0, /* nb_negative */
0, /* nb_positive */
0, /* nb_absolute */
0, /* nb_bool */
0, /* nb_invert */
0, /* nb_lshift */
0, /* nb_rshift */
0, /* nb_and */
0, /* nb_xor */
0, /* nb_or */
(unaryfunc)interpid_int, /* nb_int */
0, /* nb_reserved */
0, /* nb_float */
0, /* nb_inplace_add */
0, /* nb_inplace_subtract */
0, /* nb_inplace_multiply */
0, /* nb_inplace_remainder */
0, /* nb_inplace_power */
0, /* nb_inplace_lshift */
0, /* nb_inplace_rshift */
0, /* nb_inplace_and */
0, /* nb_inplace_xor */
0, /* nb_inplace_or */
0, /* nb_floor_divide */
0, /* nb_true_divide */
0, /* nb_inplace_floor_divide */
0, /* nb_inplace_true_divide */
(unaryfunc)interpid_int, /* nb_index */
};
static Py_hash_t
interpid_hash(PyObject *self)
{
interpid *id = (interpid *)self;
PyObject *obj = PyLong_FromLongLong(id->id);
if (obj == NULL) {
return -1;
}
Py_hash_t hash = PyObject_Hash(obj);
Py_DECREF(obj);
return hash;
}
static PyObject *
interpid_richcompare(PyObject *self, PyObject *other, int op)
{
if (op != Py_EQ && op != Py_NE) {
Py_RETURN_NOTIMPLEMENTED;
}
if (!PyObject_TypeCheck(self, &_PyInterpreterID_Type)) {
Py_RETURN_NOTIMPLEMENTED;
}
interpid *id = (interpid *)self;
int equal;
if (PyObject_TypeCheck(other, &_PyInterpreterID_Type)) {
interpid *otherid = (interpid *)other;
equal = (id->id == otherid->id);
}
else {
other = PyNumber_Long(other);
if (other == NULL) {
PyErr_Clear();
Py_RETURN_NOTIMPLEMENTED;
}
int64_t otherid = PyLong_AsLongLong(other);
Py_DECREF(other);
if (otherid == -1 && PyErr_Occurred() != NULL) {
return NULL;
}
if (otherid < 0) {
equal = 0;
}
else {
equal = (id->id == otherid);
}
}
if ((op == Py_EQ && equal) || (op == Py_NE && !equal)) {
Py_RETURN_TRUE;
}
Py_RETURN_FALSE;
}
PyDoc_STRVAR(interpid_doc,
"A interpreter ID identifies a interpreter and may be used as an int.");
PyTypeObject _PyInterpreterID_Type = {
PyVarObject_HEAD_INIT(&PyType_Type, 0)
"InterpreterID", /* tp_name */
sizeof(interpid), /* tp_basicsize */
0, /* tp_itemsize */
(destructor)interpid_dealloc, /* tp_dealloc */
0, /* tp_print */
0, /* tp_getattr */
0, /* tp_setattr */
0, /* tp_as_async */
(reprfunc)interpid_repr, /* tp_repr */
&interpid_as_number, /* tp_as_number */
0, /* tp_as_sequence */
0, /* tp_as_mapping */
interpid_hash, /* tp_hash */
0, /* tp_call */
(reprfunc)interpid_str, /* tp_str */
0, /* tp_getattro */
0, /* tp_setattro */
0, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE |
Py_TPFLAGS_LONG_SUBCLASS, /* tp_flags */
interpid_doc, /* tp_doc */
0, /* tp_traverse */
0, /* tp_clear */
interpid_richcompare, /* tp_richcompare */
0, /* tp_weaklistoffset */
0, /* tp_iter */
0, /* tp_iternext */
0, /* tp_methods */
0, /* tp_members */
0, /* tp_getset */
&PyLong_Type, /* tp_base */
0, /* tp_dict */
0, /* tp_descr_get */
0, /* tp_descr_set */
0, /* tp_dictoffset */
0, /* tp_init */
0, /* tp_alloc */
interpid_new, /* tp_new */
};
PyObject *_PyInterpreterID_New(int64_t id)
{
return (PyObject *)newinterpid(&_PyInterpreterID_Type, id, 0);
}
PyObject *
_PyInterpreterState_GetIDObject(PyInterpreterState *interp)
{
if (_PyInterpreterState_IDInitref(interp) != 0) {
return NULL;
};
PY_INT64_T id = PyInterpreterState_GetID(interp);
if (id < 0) {
return NULL;
}
return (PyObject *)newinterpid(&_PyInterpreterID_Type, id, 0);
}
PyInterpreterState *
_PyInterpreterID_LookUp(PyObject *requested_id)
{
int64_t id;
if (PyObject_TypeCheck(requested_id, &_PyInterpreterID_Type)) {
id = ((interpid *)requested_id)->id;
}
else {
id = PyLong_AsLongLong(requested_id);
if (id == -1 && PyErr_Occurred() != NULL) {
return NULL;
}
assert(id <= INT64_MAX);
}
return _PyInterpreterState_LookUpID(id);
}
...@@ -5,7 +5,6 @@ ...@@ -5,7 +5,6 @@
#include "pycore_pystate.h" #include "pycore_pystate.h"
#include "pycore_context.h" #include "pycore_context.h"
#include "frameobject.h" #include "frameobject.h"
#include "interpreteridobject.h"
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
...@@ -1807,7 +1806,6 @@ _PyTypes_Init(void) ...@@ -1807,7 +1806,6 @@ _PyTypes_Init(void)
INIT_TYPE(&PySeqIter_Type, "sequence iterator"); INIT_TYPE(&PySeqIter_Type, "sequence iterator");
INIT_TYPE(&PyCoro_Type, "coroutine"); INIT_TYPE(&PyCoro_Type, "coroutine");
INIT_TYPE(&_PyCoroWrapper_Type, "coroutine wrapper"); INIT_TYPE(&_PyCoroWrapper_Type, "coroutine wrapper");
INIT_TYPE(&_PyInterpreterID_Type, "interpreter ID");
return _Py_INIT_OK(); return _Py_INIT_OK();
#undef INIT_TYPE #undef INIT_TYPE
......
...@@ -154,7 +154,6 @@ ...@@ -154,7 +154,6 @@
<ClInclude Include="..\Include\internal\pycore_pystate.h" /> <ClInclude Include="..\Include\internal\pycore_pystate.h" />
<ClInclude Include="..\Include\internal\pycore_tupleobject.h" /> <ClInclude Include="..\Include\internal\pycore_tupleobject.h" />
<ClInclude Include="..\Include\internal\pycore_warnings.h" /> <ClInclude Include="..\Include\internal\pycore_warnings.h" />
<ClInclude Include="..\Include\interpreteridobject.h" />
<ClInclude Include="..\Include\intrcheck.h" /> <ClInclude Include="..\Include\intrcheck.h" />
<ClInclude Include="..\Include\iterobject.h" /> <ClInclude Include="..\Include\iterobject.h" />
<ClInclude Include="..\Include\listobject.h" /> <ClInclude Include="..\Include\listobject.h" />
...@@ -351,7 +350,6 @@ ...@@ -351,7 +350,6 @@
<ClCompile Include="..\Objects\frameobject.c" /> <ClCompile Include="..\Objects\frameobject.c" />
<ClCompile Include="..\Objects\funcobject.c" /> <ClCompile Include="..\Objects\funcobject.c" />
<ClCompile Include="..\Objects\genobject.c" /> <ClCompile Include="..\Objects\genobject.c" />
<ClCompile Include="..\Objects\interpreteridobject.c" />
<ClCompile Include="..\Objects\iterobject.c" /> <ClCompile Include="..\Objects\iterobject.c" />
<ClCompile Include="..\Objects\listobject.c" /> <ClCompile Include="..\Objects\listobject.c" />
<ClCompile Include="..\Objects\longobject.c" /> <ClCompile Include="..\Objects\longobject.c" />
......
...@@ -483,9 +483,6 @@ ...@@ -483,9 +483,6 @@
<ClInclude Include="..\Include\namespaceobject.h"> <ClInclude Include="..\Include\namespaceobject.h">
<Filter>Include</Filter> <Filter>Include</Filter>
</ClInclude> </ClInclude>
<ClInclude Include="..\Include\interpreteridobject.h">
<Filter>Include</Filter>
</ClInclude>
<ClInclude Include="..\Modules\hashtable.h"> <ClInclude Include="..\Modules\hashtable.h">
<Filter>Modules</Filter> <Filter>Modules</Filter>
</ClInclude> </ClInclude>
...@@ -1046,9 +1043,6 @@ ...@@ -1046,9 +1043,6 @@
<ClCompile Include="..\Objects\namespaceobject.c"> <ClCompile Include="..\Objects\namespaceobject.c">
<Filter>Objects</Filter> <Filter>Objects</Filter>
</ClCompile> </ClCompile>
<ClCompile Include="..\Objects\interpreteridobject.c">
<Filter>Objects</Filter>
</ClCompile>
<ClCompile Include="..\Modules\_opcode.c"> <ClCompile Include="..\Modules\_opcode.c">
<Filter>Modules</Filter> <Filter>Modules</Filter>
</ClCompile> </ClCompile>
......
This diff is collapsed.
...@@ -176,7 +176,7 @@ static void drop_gil(PyThreadState *tstate) ...@@ -176,7 +176,7 @@ static void drop_gil(PyThreadState *tstate)
&_PyRuntime.ceval.gil.last_holder) &_PyRuntime.ceval.gil.last_holder)
) == tstate) ) == tstate)
{ {
RESET_GIL_DROP_REQUEST(tstate->interp); RESET_GIL_DROP_REQUEST();
/* NOTE: if COND_WAIT does not atomically start waiting when /* NOTE: if COND_WAIT does not atomically start waiting when
releasing the mutex, another thread can run through, take releasing the mutex, another thread can run through, take
the GIL and drop it again, and reset the condition the GIL and drop it again, and reset the condition
...@@ -213,7 +213,7 @@ static void take_gil(PyThreadState *tstate) ...@@ -213,7 +213,7 @@ static void take_gil(PyThreadState *tstate)
if (timed_out && if (timed_out &&
_Py_atomic_load_relaxed(&_PyRuntime.ceval.gil.locked) && _Py_atomic_load_relaxed(&_PyRuntime.ceval.gil.locked) &&
_PyRuntime.ceval.gil.switch_number == saved_switchnum) { _PyRuntime.ceval.gil.switch_number == saved_switchnum) {
SET_GIL_DROP_REQUEST(tstate->interp); SET_GIL_DROP_REQUEST();
} }
} }
_ready: _ready:
...@@ -239,10 +239,10 @@ _ready: ...@@ -239,10 +239,10 @@ _ready:
MUTEX_UNLOCK(_PyRuntime.ceval.gil.switch_mutex); MUTEX_UNLOCK(_PyRuntime.ceval.gil.switch_mutex);
#endif #endif
if (_Py_atomic_load_relaxed(&_PyRuntime.ceval.gil_drop_request)) { if (_Py_atomic_load_relaxed(&_PyRuntime.ceval.gil_drop_request)) {
RESET_GIL_DROP_REQUEST(tstate->interp); RESET_GIL_DROP_REQUEST();
} }
if (tstate->async_exc != NULL) { if (tstate->async_exc != NULL) {
_PyEval_SignalAsyncExc(tstate->interp); _PyEval_SignalAsyncExc();
} }
MUTEX_UNLOCK(_PyRuntime.ceval.gil.mutex); MUTEX_UNLOCK(_PyRuntime.ceval.gil.mutex);
......
...@@ -1460,32 +1460,8 @@ Py_EndInterpreter(PyThreadState *tstate) ...@@ -1460,32 +1460,8 @@ Py_EndInterpreter(PyThreadState *tstate)
if (tstate->frame != NULL) if (tstate->frame != NULL)
Py_FatalError("Py_EndInterpreter: thread still has a frame"); Py_FatalError("Py_EndInterpreter: thread still has a frame");
// Mark as finalizing.
if (interp->ceval.pending.lock != NULL) {
PyThread_acquire_lock(interp->ceval.pending.lock, 1);
}
interp->finalizing = 1;
if (interp->ceval.pending.lock != NULL) {
PyThread_release_lock(interp->ceval.pending.lock);
}
// Wrap up existing threads.
wait_for_thread_shutdown(); wait_for_thread_shutdown();
// Make any pending calls.
if (_Py_atomic_load_relaxed(
&(interp->ceval.pending.calls_to_do)))
{
// XXX Ensure that the interpreter is running in the current thread?
if (_Py_MakePendingCalls(interp) < 0) {
PyObject *exc, *val, *tb;
PyErr_Fetch(&exc, &val, &tb);
PyErr_BadInternalCall();
_PyErr_ChainExceptions(exc, val, tb);
PyErr_Print();
}
}
call_py_exitfuncs(interp); call_py_exitfuncs(interp);
if (tstate != interp->tstate_head || tstate->next != NULL) if (tstate != interp->tstate_head || tstate->next != NULL)
......
...@@ -133,19 +133,28 @@ PyInterpreterState_New(void) ...@@ -133,19 +133,28 @@ PyInterpreterState_New(void)
return NULL; return NULL;
} }
memset(interp, 0, sizeof(*interp));
interp->id_refcount = -1; interp->id_refcount = -1;
interp->id_mutex = NULL;
interp->modules = NULL;
interp->modules_by_index = NULL;
interp->sysdict = NULL;
interp->builtins = NULL;
interp->builtins_copy = NULL;
interp->tstate_head = NULL;
interp->check_interval = 100; interp->check_interval = 100;
interp->num_threads = 0;
interp->ceval.pending.lock = PyThread_allocate_lock(); interp->pythread_stacksize = 0;
if (interp->ceval.pending.lock == NULL) { interp->codec_search_path = NULL;
PyErr_SetString(PyExc_RuntimeError, interp->codec_search_cache = NULL;
"failed to create interpreter ceval pending mutex"); interp->codec_error_registry = NULL;
return NULL; interp->codecs_initialized = 0;
} interp->fscodec_initialized = 0;
interp->core_config = _PyCoreConfig_INIT; interp->core_config = _PyCoreConfig_INIT;
interp->config = _PyMainInterpreterConfig_INIT; interp->config = _PyMainInterpreterConfig_INIT;
interp->importlib = NULL;
interp->import_func = NULL;
interp->eval_frame = _PyEval_EvalFrameDefault; interp->eval_frame = _PyEval_EvalFrameDefault;
interp->co_extra_user_count = 0;
#ifdef HAVE_DLOPEN #ifdef HAVE_DLOPEN
#if HAVE_DECL_RTLD_NOW #if HAVE_DECL_RTLD_NOW
interp->dlopenflags = RTLD_NOW; interp->dlopenflags = RTLD_NOW;
...@@ -153,10 +162,13 @@ PyInterpreterState_New(void) ...@@ -153,10 +162,13 @@ PyInterpreterState_New(void)
interp->dlopenflags = RTLD_LAZY; interp->dlopenflags = RTLD_LAZY;
#endif #endif
#endif #endif
#ifdef HAVE_FORK
if (_PyRuntime.main_thread == 0) { interp->before_forkers = NULL;
_PyRuntime.main_thread = PyThread_get_thread_ident(); interp->after_forkers_parent = NULL;
} interp->after_forkers_child = NULL;
#endif
interp->pyexitfunc = NULL;
interp->pyexitmodule = NULL;
HEAD_LOCK(); HEAD_LOCK();
if (_PyRuntime.interpreters.next_id < 0) { if (_PyRuntime.interpreters.next_id < 0) {
...@@ -211,9 +223,6 @@ PyInterpreterState_Clear(PyInterpreterState *interp) ...@@ -211,9 +223,6 @@ PyInterpreterState_Clear(PyInterpreterState *interp)
Py_CLEAR(interp->after_forkers_parent); Py_CLEAR(interp->after_forkers_parent);
Py_CLEAR(interp->after_forkers_child); Py_CLEAR(interp->after_forkers_child);
#endif #endif
// XXX Once we have one allocator per interpreter (i.e.
// per-interpreter GC) we must ensure that all of the interpreter's
// objects have been cleaned up at the point.
} }
...@@ -254,9 +263,6 @@ PyInterpreterState_Delete(PyInterpreterState *interp) ...@@ -254,9 +263,6 @@ PyInterpreterState_Delete(PyInterpreterState *interp)
if (interp->id_mutex != NULL) { if (interp->id_mutex != NULL) {
PyThread_free_lock(interp->id_mutex); PyThread_free_lock(interp->id_mutex);
} }
if (interp->ceval.pending.lock != NULL) {
PyThread_free_lock(interp->ceval.pending.lock);
}
PyMem_RawFree(interp); PyMem_RawFree(interp);
} }
...@@ -328,37 +334,26 @@ PyInterpreterState_GetID(PyInterpreterState *interp) ...@@ -328,37 +334,26 @@ PyInterpreterState_GetID(PyInterpreterState *interp)
} }
static PyInterpreterState * PyInterpreterState *
interp_look_up_id(PY_INT64_T requested_id) _PyInterpreterState_LookUpID(PY_INT64_T requested_id)
{ {
if (requested_id < 0)
goto error;
PyInterpreterState *interp = PyInterpreterState_Head(); PyInterpreterState *interp = PyInterpreterState_Head();
while (interp != NULL) { while (interp != NULL) {
PY_INT64_T id = PyInterpreterState_GetID(interp); PY_INT64_T id = PyInterpreterState_GetID(interp);
if (id < 0) { if (id < 0)
return NULL; return NULL;
} if (requested_id == id)
if (requested_id == id) {
return interp; return interp;
}
interp = PyInterpreterState_Next(interp); interp = PyInterpreterState_Next(interp);
} }
return NULL;
}
PyInterpreterState * error:
_PyInterpreterState_LookUpID(PY_INT64_T requested_id) PyErr_Format(PyExc_RuntimeError,
{ "unrecognized interpreter ID %lld", requested_id);
PyInterpreterState *interp = NULL; return NULL;
if (requested_id >= 0) {
HEAD_UNLOCK();
interp = interp_look_up_id(requested_id);
HEAD_UNLOCK();
}
if (interp == NULL && !PyErr_Occurred()) {
PyErr_Format(PyExc_RuntimeError,
"unrecognized interpreter ID %lld", requested_id);
}
return interp;
} }
...@@ -403,7 +398,7 @@ _PyInterpreterState_IDDecref(PyInterpreterState *interp) ...@@ -403,7 +398,7 @@ _PyInterpreterState_IDDecref(PyInterpreterState *interp)
int64_t refcount = interp->id_refcount; int64_t refcount = interp->id_refcount;
PyThread_release_lock(interp->id_mutex); PyThread_release_lock(interp->id_mutex);
if (refcount == 0 && interp->requires_idref) { if (refcount == 0) {
// XXX Using the "head" thread isn't strictly correct. // XXX Using the "head" thread isn't strictly correct.
PyThreadState *tstate = PyInterpreterState_ThreadHead(interp); PyThreadState *tstate = PyInterpreterState_ThreadHead(interp);
// XXX Possible GILState issues? // XXX Possible GILState issues?
...@@ -413,18 +408,6 @@ _PyInterpreterState_IDDecref(PyInterpreterState *interp) ...@@ -413,18 +408,6 @@ _PyInterpreterState_IDDecref(PyInterpreterState *interp)
} }
} }
int
_PyInterpreterState_RequiresIDRef(PyInterpreterState *interp)
{
return interp->requires_idref;
}
void
_PyInterpreterState_RequireIDRef(PyInterpreterState *interp, int required)
{
interp->requires_idref = required ? 1 : 0;
}
_PyCoreConfig * _PyCoreConfig *
_PyInterpreterState_GetCoreConfig(PyInterpreterState *interp) _PyInterpreterState_GetCoreConfig(PyInterpreterState *interp)
{ {
...@@ -437,16 +420,6 @@ _PyInterpreterState_GetMainConfig(PyInterpreterState *interp) ...@@ -437,16 +420,6 @@ _PyInterpreterState_GetMainConfig(PyInterpreterState *interp)
return &interp->config; return &interp->config;
} }
PyObject *
_PyInterpreterState_GetMainModule(PyInterpreterState *interp)
{
if (interp->modules == NULL) {
PyErr_SetString(PyExc_RuntimeError, "interpreter not initialized");
return NULL;
}
return PyMapping_GetItemString(interp->modules, "__main__");
}
/* Default implementation for _PyThreadState_GetFrame */ /* Default implementation for _PyThreadState_GetFrame */
static struct _frame * static struct _frame *
threadstate_getframe(PyThreadState *self) threadstate_getframe(PyThreadState *self)
...@@ -899,7 +872,7 @@ PyThreadState_SetAsyncExc(unsigned long id, PyObject *exc) ...@@ -899,7 +872,7 @@ PyThreadState_SetAsyncExc(unsigned long id, PyObject *exc)
p->async_exc = exc; p->async_exc = exc;
HEAD_UNLOCK(); HEAD_UNLOCK();
Py_XDECREF(old_exc); Py_XDECREF(old_exc);
_PyEval_SignalAsyncExc(interp); _PyEval_SignalAsyncExc();
return 1; return 1;
} }
} }
...@@ -1313,7 +1286,7 @@ _PyObject_GetCrossInterpreterData(PyObject *obj, _PyCrossInterpreterData *data) ...@@ -1313,7 +1286,7 @@ _PyObject_GetCrossInterpreterData(PyObject *obj, _PyCrossInterpreterData *data)
return 0; return 0;
} }
static int static void
_release_xidata(void *arg) _release_xidata(void *arg)
{ {
_PyCrossInterpreterData *data = (_PyCrossInterpreterData *)arg; _PyCrossInterpreterData *data = (_PyCrossInterpreterData *)arg;
...@@ -1321,8 +1294,30 @@ _release_xidata(void *arg) ...@@ -1321,8 +1294,30 @@ _release_xidata(void *arg)
data->free(data->data); data->free(data->data);
} }
Py_XDECREF(data->obj); Py_XDECREF(data->obj);
PyMem_Free(data); }
return 0;
static void
_call_in_interpreter(PyInterpreterState *interp,
void (*func)(void *), void *arg)
{
/* We would use Py_AddPendingCall() if it weren't specific to the
* main interpreter (see bpo-33608). In the meantime we take a
* naive approach.
*/
PyThreadState *save_tstate = NULL;
if (interp != _PyInterpreterState_Get()) {
// XXX Using the "head" thread isn't strictly correct.
PyThreadState *tstate = PyInterpreterState_ThreadHead(interp);
// XXX Possible GILState issues?
save_tstate = PyThreadState_Swap(tstate);
}
func(arg);
// Switch back.
if (save_tstate != NULL) {
PyThreadState_Swap(save_tstate);
}
} }
void void
...@@ -1333,7 +1328,7 @@ _PyCrossInterpreterData_Release(_PyCrossInterpreterData *data) ...@@ -1333,7 +1328,7 @@ _PyCrossInterpreterData_Release(_PyCrossInterpreterData *data)
return; return;
} }
// Get the original interpreter. // Switch to the original interpreter.
PyInterpreterState *interp = _PyInterpreterState_LookUpID(data->interp); PyInterpreterState *interp = _PyInterpreterState_LookUpID(data->interp);
if (interp == NULL) { if (interp == NULL) {
// The intepreter was already destroyed. // The intepreter was already destroyed.
...@@ -1342,24 +1337,9 @@ _PyCrossInterpreterData_Release(_PyCrossInterpreterData *data) ...@@ -1342,24 +1337,9 @@ _PyCrossInterpreterData_Release(_PyCrossInterpreterData *data)
} }
return; return;
} }
// XXX There's an ever-so-slight race here...
if (interp->finalizing) {
// XXX Someone leaked some memory...
return;
}
// "Release" the data and/or the object. // "Release" the data and/or the object.
_PyCrossInterpreterData *copied = PyMem_Malloc(sizeof(_PyCrossInterpreterData)); _call_in_interpreter(interp, _release_xidata, data);
if (copied == NULL) {
PyErr_SetString(PyExc_MemoryError,
"Not enough memory to preserve cross-interpreter data");
PyErr_Print();
return;
}
memcpy(copied, data, sizeof(_PyCrossInterpreterData));
if (_Py_AddPendingCall(interp, 0, _release_xidata, copied) != 0) {
// XXX Queue full or couldn't get lock. Try again somehow?
}
} }
PyObject * PyObject *
...@@ -1392,7 +1372,7 @@ _register_xidata(PyTypeObject *cls, crossinterpdatafunc getdata) ...@@ -1392,7 +1372,7 @@ _register_xidata(PyTypeObject *cls, crossinterpdatafunc getdata)
static void _register_builtins_for_crossinterpreter_data(void); static void _register_builtins_for_crossinterpreter_data(void);
int int
_PyCrossInterpreterData_RegisterClass(PyTypeObject *cls, _PyCrossInterpreterData_Register_Class(PyTypeObject *cls,
crossinterpdatafunc getdata) crossinterpdatafunc getdata)
{ {
if (!PyType_Check(cls)) { if (!PyType_Check(cls)) {
......
...@@ -784,7 +784,9 @@ class PyBuildExt(build_ext): ...@@ -784,7 +784,9 @@ class PyBuildExt(build_ext):
self.add(Extension('syslog', ['syslogmodule.c'])) self.add(Extension('syslog', ['syslogmodule.c']))
# Python interface to subinterpreter C-API. # Python interface to subinterpreter C-API.
self.add(Extension('_xxsubinterpreters', ['_xxsubinterpretersmodule.c'])) self.add(Extension('_xxsubinterpreters',
['_xxsubinterpretersmodule.c'],
define_macros=[('Py_BUILD_CORE', '')]))
# #
# Here ends the simple stuff. From here on, modules need certain # Here ends the simple stuff. From here on, modules need certain
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment