Commit 5ac9e6ee authored by INADA Naoki's avatar INADA Naoki Committed by GitHub

bpo-33597: Reduce PyGC_Head size (GH-7043)

parent 445f1b35
......@@ -251,76 +251,88 @@ PyAPI_FUNC(PyVarObject *) _PyObject_GC_Resize(PyVarObject *, Py_ssize_t);
/* GC information is stored BEFORE the object structure. */
#ifndef Py_LIMITED_API
typedef union _gc_head {
struct {
union _gc_head *gc_next;
union _gc_head *gc_prev;
Py_ssize_t gc_refs;
} gc;
double dummy; /* force worst-case alignment */
typedef struct {
// Pointer to next object in the list.
// 0 means the object is not tracked
uintptr_t _gc_next;
// Pointer to previous object in the list.
// Lowest two bits are used for flags documented later.
uintptr_t _gc_prev;
} PyGC_Head;
extern PyGC_Head *_PyGC_generation0;
#define _Py_AS_GC(o) ((PyGC_Head *)(o)-1)
/* Bit flags for _gc_prev */
/* Bit 0 is set when tp_finalize is called */
#define _PyGC_REFS_MASK_FINALIZED (1 << 0)
/* The (N-1) most significant bits contain the gc state / refcount */
#define _PyGC_REFS_SHIFT (1)
#define _PyGC_REFS_MASK (((size_t) -1) << _PyGC_REFS_SHIFT)
#define _PyGCHead_REFS(g) ((g)->gc.gc_refs >> _PyGC_REFS_SHIFT)
#define _PyGCHead_SET_REFS(g, v) do { \
(g)->gc.gc_refs = ((g)->gc.gc_refs & ~_PyGC_REFS_MASK) \
| (((size_t)(v)) << _PyGC_REFS_SHIFT); \
#define _PyGC_PREV_MASK_FINALIZED (1)
/* Bit 1 is set when the object is in generation which is GCed currently. */
#define _PyGC_PREV_MASK_COLLECTING (2)
/* The (N-2) most significant bits contain the real address. */
#define _PyGC_PREV_SHIFT (2)
#define _PyGC_PREV_MASK (((uintptr_t) -1) << _PyGC_PREV_SHIFT)
// Lowest bit of _gc_next is used for flags only in GC.
// But it is always 0 for normal code.
#define _PyGCHead_NEXT(g) ((PyGC_Head*)(g)->_gc_next)
#define _PyGCHead_SET_NEXT(g, p) ((g)->_gc_next = (uintptr_t)(p))
// Lowest two bits of _gc_prev is used for _PyGC_PREV_MASK_* flags.
#define _PyGCHead_PREV(g) ((PyGC_Head*)((g)->_gc_prev & _PyGC_PREV_MASK))
#define _PyGCHead_SET_PREV(g, p) do { \
assert(((uintptr_t)p & ~_PyGC_PREV_MASK) == 0); \
(g)->_gc_prev = ((g)->_gc_prev & ~_PyGC_PREV_MASK) \
| ((uintptr_t)(p)); \
} while (0)
#define _PyGCHead_DECREF(g) ((g)->gc.gc_refs -= 1 << _PyGC_REFS_SHIFT)
#define _PyGCHead_FINALIZED(g) (((g)->gc.gc_refs & _PyGC_REFS_MASK_FINALIZED) != 0)
#define _PyGCHead_SET_FINALIZED(g, v) do { \
(g)->gc.gc_refs = ((g)->gc.gc_refs & ~_PyGC_REFS_MASK_FINALIZED) \
| (v != 0); \
} while (0)
#define _PyGCHead_FINALIZED(g) (((g)->_gc_prev & _PyGC_PREV_MASK_FINALIZED) != 0)
#define _PyGCHead_SET_FINALIZED(g) ((g)->_gc_prev |= _PyGC_PREV_MASK_FINALIZED)
#define _PyGC_FINALIZED(o) _PyGCHead_FINALIZED(_Py_AS_GC(o))
#define _PyGC_SET_FINALIZED(o, v) _PyGCHead_SET_FINALIZED(_Py_AS_GC(o), v)
#define _PyGC_REFS(o) _PyGCHead_REFS(_Py_AS_GC(o))
#define _PyGC_REFS_UNTRACKED (-2)
#define _PyGC_REFS_REACHABLE (-3)
#define _PyGC_REFS_TENTATIVELY_UNREACHABLE (-4)
/* Tell the GC to track this object. NB: While the object is tracked the
* collector it must be safe to call the ob_traverse method. */
#define _PyGC_SET_FINALIZED(o) _PyGCHead_SET_FINALIZED(_Py_AS_GC(o))
/* Tell the GC to track this object.
*
* NB: While the object is tracked by the collector, it must be safe to call the
* ob_traverse method.
*
* Internal note: _PyGC_generation0->_gc_prev doesn't have any bit flags
* because it's not object header. So we don't use _PyGCHead_PREV() and
* _PyGCHead_SET_PREV() for it to avoid unnecessary bitwise operations.
*/
#define _PyObject_GC_TRACK(o) do { \
PyGC_Head *g = _Py_AS_GC(o); \
if (_PyGCHead_REFS(g) != _PyGC_REFS_UNTRACKED) \
if (g->_gc_next != 0) { \
Py_FatalError("GC object already tracked"); \
_PyGCHead_SET_REFS(g, _PyGC_REFS_REACHABLE); \
g->gc.gc_next = _PyGC_generation0; \
g->gc.gc_prev = _PyGC_generation0->gc.gc_prev; \
g->gc.gc_prev->gc.gc_next = g; \
_PyGC_generation0->gc.gc_prev = g; \
} \
assert((g->_gc_prev & _PyGC_PREV_MASK_COLLECTING) == 0); \
PyGC_Head *last = (PyGC_Head*)(_PyGC_generation0->_gc_prev); \
_PyGCHead_SET_NEXT(last, g); \
_PyGCHead_SET_PREV(g, last); \
_PyGCHead_SET_NEXT(g, _PyGC_generation0); \
_PyGC_generation0->_gc_prev = (uintptr_t)g; \
} while (0);
/* Tell the GC to stop tracking this object.
* gc_next doesn't need to be set to NULL, but doing so is a good
* way to provoke memory errors if calling code is confused.
*
* Internal note: This may be called while GC. So _PyGC_PREV_MASK_COLLECTING must
* be cleared. But _PyGC_PREV_MASK_FINALIZED bit is kept.
*/
#define _PyObject_GC_UNTRACK(o) do { \
PyGC_Head *g = _Py_AS_GC(o); \
assert(_PyGCHead_REFS(g) != _PyGC_REFS_UNTRACKED); \
_PyGCHead_SET_REFS(g, _PyGC_REFS_UNTRACKED); \
g->gc.gc_prev->gc.gc_next = g->gc.gc_next; \
g->gc.gc_next->gc.gc_prev = g->gc.gc_prev; \
g->gc.gc_next = NULL; \
PyGC_Head *prev = _PyGCHead_PREV(g); \
PyGC_Head *next = _PyGCHead_NEXT(g); \
assert(next != NULL); \
_PyGCHead_SET_NEXT(prev, next); \
_PyGCHead_SET_PREV(next, prev); \
g->_gc_next = 0; \
g->_gc_prev &= _PyGC_PREV_MASK_FINALIZED; \
} while (0);
/* True if the object is currently tracked by the GC. */
#define _PyObject_GC_IS_TRACKED(o) \
(_PyGC_REFS(o) != _PyGC_REFS_UNTRACKED)
#define _PyObject_GC_IS_TRACKED(o) (_Py_AS_GC(o)->_gc_next != 0)
/* True if the object may be tracked by the GC in the future, or already is.
This can be useful to implement some optimizations. */
......
......@@ -36,6 +36,72 @@ module gc
[clinic start generated code]*/
/*[clinic end generated code: output=da39a3ee5e6b4b0d input=b5c9690ecc842d79]*/
#define GC_DEBUG (0) /* Enable more asserts */
#define GC_NEXT _PyGCHead_NEXT
#define GC_PREV _PyGCHead_PREV
// update_refs() set this bit for all objects in current generation.
// subtract_refs() and move_unreachable() uses this to distinguish
// visited object is in GCing or not.
//
// move_unreachable() removes this flag from reachable objects.
// Only unreachable objects have this flag.
//
// No objects in interpreter have this flag after GC ends.
#define PREV_MASK_COLLECTING _PyGC_PREV_MASK_COLLECTING
// Lowest bit of _gc_next is used for UNREACHABLE flag.
//
// This flag represents the object is in unreachable list in move_unreachable()
//
// Although this flag is used only in move_unreachable(), move_unreachable()
// doesn't clear this flag to skip unnecessary iteration.
// move_legacy_finalizers() removes this flag instead.
// Between them, unreachable list is not normal list and we can not use
// most gc_list_* functions for it.
#define NEXT_MASK_UNREACHABLE (1)
static inline int
gc_is_collecting(PyGC_Head *g)
{
return (g->_gc_prev & PREV_MASK_COLLECTING) != 0;
}
static inline void
gc_clear_collecting(PyGC_Head *g)
{
g->_gc_prev &= ~PREV_MASK_COLLECTING;
}
static inline Py_ssize_t
gc_get_refs(PyGC_Head *g)
{
return (Py_ssize_t)(g->_gc_prev >> _PyGC_PREV_SHIFT);
}
static inline void
gc_set_refs(PyGC_Head *g, Py_ssize_t refs)
{
g->_gc_prev = (g->_gc_prev & ~_PyGC_PREV_MASK)
| ((uintptr_t)(refs) << _PyGC_PREV_SHIFT);
}
static inline void
gc_reset_refs(PyGC_Head *g, Py_ssize_t refs)
{
g->_gc_prev = (g->_gc_prev & _PyGC_PREV_MASK_FINALIZED)
| PREV_MASK_COLLECTING
| ((uintptr_t)(refs) << _PyGC_PREV_SHIFT);
}
static inline void
gc_decref(PyGC_Head *g)
{
assert(gc_get_refs(g) > 0);
g->_gc_prev -= 1 << _PyGC_PREV_SHIFT;
}
/* Get an object's GC head */
#define AS_GC(o) ((PyGC_Head *)(o)-1)
......@@ -63,104 +129,116 @@ _PyGC_Initialize(struct _gc_runtime_state *state)
#define _GEN_HEAD(n) (&state->generations[n].head)
struct gc_generation generations[NUM_GENERATIONS] = {
/* PyGC_Head, threshold, count */
{{{_GEN_HEAD(0), _GEN_HEAD(0), 0}}, 700, 0},
{{{_GEN_HEAD(1), _GEN_HEAD(1), 0}}, 10, 0},
{{{_GEN_HEAD(2), _GEN_HEAD(2), 0}}, 10, 0},
/* PyGC_Head, threshold, count */
{{(uintptr_t)_GEN_HEAD(0), (uintptr_t)_GEN_HEAD(0)}, 700, 0},
{{(uintptr_t)_GEN_HEAD(1), (uintptr_t)_GEN_HEAD(1)}, 10, 0},
{{(uintptr_t)_GEN_HEAD(2), (uintptr_t)_GEN_HEAD(2)}, 10, 0},
};
for (int i = 0; i < NUM_GENERATIONS; i++) {
state->generations[i] = generations[i];
};
state->generation0 = GEN_HEAD(0);
struct gc_generation permanent_generation = {
{{&state->permanent_generation.head, &state->permanent_generation.head, 0}}, 0, 0
{(uintptr_t)&state->permanent_generation.head,
(uintptr_t)&state->permanent_generation.head}, 0, 0
};
state->permanent_generation = permanent_generation;
}
/*--------------------------------------------------------------------------
gc_refs values.
Between collections, every gc'ed object has one of two gc_refs values:
/*
_gc_prev values
---------------
GC_UNTRACKED
The initial state; objects returned by PyObject_GC_Malloc are in this
state. The object doesn't live in any generation list, and its
tp_traverse slot must not be called.
Between collections, _gc_prev is used for doubly linked list.
GC_REACHABLE
The object lives in some generation list, and its tp_traverse is safe to
call. An object transitions to GC_REACHABLE when PyObject_GC_Track
is called.
Lowest two bits of _gc_prev are used for flags.
PREV_MASK_COLLECTING is used only while collecting and cleared before GC ends
or _PyObject_GC_UNTRACK() is called.
During a collection, gc_refs can temporarily take on other states:
During a collection, _gc_prev is temporary used for gc_refs, and the gc list
is singly linked until _gc_prev is restored.
>= 0
gc_refs
At the start of a collection, update_refs() copies the true refcount
to gc_refs, for each object in the generation being collected.
subtract_refs() then adjusts gc_refs so that it equals the number of
times an object is referenced directly from outside the generation
being collected.
gc_refs remains >= 0 throughout these steps.
GC_TENTATIVELY_UNREACHABLE
PREV_MASK_COLLECTING
Objects in generation being collected are marked PREV_MASK_COLLECTING in
update_refs().
_gc_next values
---------------
_gc_next takes these values:
0
The object is not tracked
!= 0
Pointer to the next object in the GC list.
Additionally, lowest bit is used temporary for
NEXT_MASK_UNREACHABLE flag described below.
NEXT_MASK_UNREACHABLE
move_unreachable() then moves objects not reachable (whether directly or
indirectly) from outside the generation into an "unreachable" set.
Objects that are found to be reachable have gc_refs set to GC_REACHABLE
again. Objects that are found to be unreachable have gc_refs set to
GC_TENTATIVELY_UNREACHABLE. It's "tentatively" because the pass doing
this can't be sure until it ends, and GC_TENTATIVELY_UNREACHABLE may
transition back to GC_REACHABLE.
Only objects with GC_TENTATIVELY_UNREACHABLE still set are candidates
for collection. If it's decided not to collect such an object (e.g.,
it has a __del__ method), its gc_refs is restored to GC_REACHABLE again.
----------------------------------------------------------------------------
*/
#define GC_UNTRACKED _PyGC_REFS_UNTRACKED
#define GC_REACHABLE _PyGC_REFS_REACHABLE
#define GC_TENTATIVELY_UNREACHABLE _PyGC_REFS_TENTATIVELY_UNREACHABLE
indirectly) from outside the generation into an "unreachable" set and
set this flag.
Objects that are found to be reachable have gc_refs set to 1.
When this flag is set for the reachable object, the object must be in
"unreachable" set.
The flag is unset and the object is moved back to "reachable" set.
#define IS_TRACKED(o) (_PyGC_REFS(o) != GC_UNTRACKED)
#define IS_REACHABLE(o) (_PyGC_REFS(o) == GC_REACHABLE)
#define IS_TENTATIVELY_UNREACHABLE(o) ( \
_PyGC_REFS(o) == GC_TENTATIVELY_UNREACHABLE)
move_legacy_finalizers() will remove this flag from "unreachable" set.
*/
/*** list functions ***/
static void
static inline void
gc_list_init(PyGC_Head *list)
{
list->gc.gc_prev = list;
list->gc.gc_next = list;
// List header must not have flags.
// We can assign pointer by simple cast.
list->_gc_prev = (uintptr_t)list;
list->_gc_next = (uintptr_t)list;
}
static int
static inline int
gc_list_is_empty(PyGC_Head *list)
{
return (list->gc.gc_next == list);
return (list->_gc_next == (uintptr_t)list);
}
#if 0
/* This became unused after gc_list_move() was introduced. */
/* Append `node` to `list`. */
static void
static inline void
gc_list_append(PyGC_Head *node, PyGC_Head *list)
{
node->gc.gc_next = list;
node->gc.gc_prev = list->gc.gc_prev;
node->gc.gc_prev->gc.gc_next = node;
list->gc.gc_prev = node;
PyGC_Head *last = (PyGC_Head *)list->_gc_prev;
// last <-> node
_PyGCHead_SET_PREV(node, last);
_PyGCHead_SET_NEXT(last, node);
// node <-> list
_PyGCHead_SET_NEXT(node, list);
list->_gc_prev = (uintptr_t)node;
}
#endif
/* Remove `node` from the gc list it's currently in. */
static void
static inline void
gc_list_remove(PyGC_Head *node)
{
node->gc.gc_prev->gc.gc_next = node->gc.gc_next;
node->gc.gc_next->gc.gc_prev = node->gc.gc_prev;
node->gc.gc_next = NULL; /* object is not currently tracked */
PyGC_Head *prev = GC_PREV(node);
PyGC_Head *next = GC_NEXT(node);
_PyGCHead_SET_NEXT(prev, next);
_PyGCHead_SET_PREV(next, prev);
node->_gc_next = 0; /* object is not currently tracked */
}
/* Move `node` from the gc list it's currently in (which is not explicitly
......@@ -170,30 +248,38 @@ gc_list_remove(PyGC_Head *node)
static void
gc_list_move(PyGC_Head *node, PyGC_Head *list)
{
PyGC_Head *new_prev;
PyGC_Head *current_prev = node->gc.gc_prev;
PyGC_Head *current_next = node->gc.gc_next;
/* Unlink from current list. */
current_prev->gc.gc_next = current_next;
current_next->gc.gc_prev = current_prev;
PyGC_Head *from_prev = GC_PREV(node);
PyGC_Head *from_next = GC_NEXT(node);
_PyGCHead_SET_NEXT(from_prev, from_next);
_PyGCHead_SET_PREV(from_next, from_prev);
/* Relink at end of new list. */
new_prev = node->gc.gc_prev = list->gc.gc_prev;
new_prev->gc.gc_next = list->gc.gc_prev = node;
node->gc.gc_next = list;
// list must not have flags. So we can skip macros.
PyGC_Head *to_prev = (PyGC_Head*)list->_gc_prev;
_PyGCHead_SET_PREV(node, to_prev);
_PyGCHead_SET_NEXT(to_prev, node);
list->_gc_prev = (uintptr_t)node;
_PyGCHead_SET_NEXT(node, list);
}
/* append list `from` onto list `to`; `from` becomes an empty list */
static void
gc_list_merge(PyGC_Head *from, PyGC_Head *to)
{
PyGC_Head *tail;
assert(from != to);
if (!gc_list_is_empty(from)) {
tail = to->gc.gc_prev;
tail->gc.gc_next = from->gc.gc_next;
tail->gc.gc_next->gc.gc_prev = tail;
to->gc.gc_prev = from->gc.gc_prev;
to->gc.gc_prev->gc.gc_next = to;
PyGC_Head *to_tail = GC_PREV(to);
PyGC_Head *from_head = GC_NEXT(from);
PyGC_Head *from_tail = GC_PREV(from);
assert(from_head != from);
assert(from_tail != from);
_PyGCHead_SET_NEXT(to_tail, from_head);
_PyGCHead_SET_PREV(from_head, to_tail);
_PyGCHead_SET_NEXT(from_tail, to);
_PyGCHead_SET_PREV(to, from_tail);
}
gc_list_init(from);
}
......@@ -203,7 +289,7 @@ gc_list_size(PyGC_Head *list)
{
PyGC_Head *gc;
Py_ssize_t n = 0;
for (gc = list->gc.gc_next; gc != list; gc = gc->gc.gc_next) {
for (gc = GC_NEXT(list); gc != list; gc = GC_NEXT(gc)) {
n++;
}
return n;
......@@ -216,7 +302,7 @@ static int
append_objects(PyObject *py_list, PyGC_Head *gc_list)
{
PyGC_Head *gc;
for (gc = gc_list->gc.gc_next; gc != gc_list; gc = gc->gc.gc_next) {
for (gc = GC_NEXT(gc_list); gc != gc_list; gc = GC_NEXT(gc)) {
PyObject *op = FROM_GC(gc);
if (op != py_list) {
if (PyList_Append(py_list, op)) {
......@@ -227,20 +313,39 @@ append_objects(PyObject *py_list, PyGC_Head *gc_list)
return 0;
}
#if GC_DEBUG
// validate_list checks list consistency. And it works as document
// describing when expected_mask is set / unset.
static void
validate_list(PyGC_Head *head, uintptr_t expected_mask)
{
PyGC_Head *prev = head;
PyGC_Head *gc = GC_NEXT(head);
while (gc != head) {
assert(GC_NEXT(gc) != NULL);
assert(GC_PREV(gc) == prev);
assert((gc->_gc_prev & PREV_MASK_COLLECTING) == expected_mask);
prev = gc;
gc = GC_NEXT(gc);
}
assert(prev == GC_PREV(head));
}
#else
#define validate_list(x,y) do{}while(0)
#endif
/*** end of list stuff ***/
/* Set all gc_refs = ob_refcnt. After this, gc_refs is > 0 for all objects
* in containers, and is GC_REACHABLE for all tracked gc objects not in
* containers.
/* Set all gc_refs = ob_refcnt. After this, gc_refs is > 0 and
* PREV_MASK_COLLECTING bit is set for all objects in containers.
*/
static void
update_refs(PyGC_Head *containers)
{
PyGC_Head *gc = containers->gc.gc_next;
for (; gc != containers; gc = gc->gc.gc_next) {
assert(_PyGCHead_REFS(gc) == GC_REACHABLE);
_PyGCHead_SET_REFS(gc, Py_REFCNT(FROM_GC(gc)));
PyGC_Head *gc = GC_NEXT(containers);
for (; gc != containers; gc = GC_NEXT(gc)) {
gc_reset_refs(gc, Py_REFCNT(FROM_GC(gc)));
/* Python's cyclic gc should never see an incoming refcount
* of 0: if something decref'ed to 0, it should have been
* deallocated immediately at that time.
......@@ -259,7 +364,7 @@ update_refs(PyGC_Head *containers)
* so serious that maybe this should be a release-build
* check instead of an assert?
*/
assert(_PyGCHead_REFS(gc) != 0);
assert(gc_get_refs(gc) != 0);
}
}
......@@ -274,9 +379,9 @@ visit_decref(PyObject *op, void *data)
* generation being collected, which can be recognized
* because only they have positive gc_refs.
*/
assert(_PyGCHead_REFS(gc) != 0); /* else refcount was too small */
if (_PyGCHead_REFS(gc) > 0)
_PyGCHead_DECREF(gc);
if (gc_is_collecting(gc)) {
gc_decref(gc);
}
}
return 0;
}
......@@ -290,8 +395,8 @@ static void
subtract_refs(PyGC_Head *containers)
{
traverseproc traverse;
PyGC_Head *gc = containers->gc.gc_next;
for (; gc != containers; gc=gc->gc.gc_next) {
PyGC_Head *gc = GC_NEXT(containers);
for (; gc != containers; gc = GC_NEXT(gc)) {
traverse = Py_TYPE(FROM_GC(gc))->tp_traverse;
(void) traverse(FROM_GC(gc),
(visitproc)visit_decref,
......@@ -303,71 +408,84 @@ subtract_refs(PyGC_Head *containers)
static int
visit_reachable(PyObject *op, PyGC_Head *reachable)
{
if (PyObject_IS_GC(op)) {
PyGC_Head *gc = AS_GC(op);
const Py_ssize_t gc_refs = _PyGCHead_REFS(gc);
if (!PyObject_IS_GC(op)) {
return 0;
}
if (gc_refs == 0) {
/* This is in move_unreachable's 'young' list, but
* the traversal hasn't yet gotten to it. All
* we need to do is tell move_unreachable that it's
* reachable.
*/
_PyGCHead_SET_REFS(gc, 1);
}
else if (gc_refs == GC_TENTATIVELY_UNREACHABLE) {
/* This had gc_refs = 0 when move_unreachable got
* to it, but turns out it's reachable after all.
* Move it back to move_unreachable's 'young' list,
* and move_unreachable will eventually get to it
* again.
*/
gc_list_move(gc, reachable);
_PyGCHead_SET_REFS(gc, 1);
}
/* Else there's nothing to do.
* If gc_refs > 0, it must be in move_unreachable's 'young'
* list, and move_unreachable will eventually get to it.
* If gc_refs == GC_REACHABLE, it's either in some other
* generation so we don't care about it, or move_unreachable
* already dealt with it.
* If gc_refs == GC_UNTRACKED, it must be ignored.
PyGC_Head *gc = AS_GC(op);
const Py_ssize_t gc_refs = gc_get_refs(gc);
// Ignore untracked objects and objects in other generation.
if (gc->_gc_next == 0 || !gc_is_collecting(gc)) {
return 0;
}
if (gc->_gc_next & NEXT_MASK_UNREACHABLE) {
/* This had gc_refs = 0 when move_unreachable got
* to it, but turns out it's reachable after all.
* Move it back to move_unreachable's 'young' list,
* and move_unreachable will eventually get to it
* again.
*/
// Manually unlink gc from unreachable list because
PyGC_Head *prev = GC_PREV(gc);
PyGC_Head *next = (PyGC_Head*)(gc->_gc_next & ~NEXT_MASK_UNREACHABLE);
assert(prev->_gc_next & NEXT_MASK_UNREACHABLE);
assert(next->_gc_next & NEXT_MASK_UNREACHABLE);
prev->_gc_next = gc->_gc_next; // copy NEXT_MASK_UNREACHABLE
_PyGCHead_SET_PREV(next, prev);
gc_list_append(gc, reachable);
gc_set_refs(gc, 1);
}
else if (gc_refs == 0) {
/* This is in move_unreachable's 'young' list, but
* the traversal hasn't yet gotten to it. All
* we need to do is tell move_unreachable that it's
* reachable.
*/
else {
assert(gc_refs > 0
|| gc_refs == GC_REACHABLE
|| gc_refs == GC_UNTRACKED);
}
gc_set_refs(gc, 1);
}
/* Else there's nothing to do.
* If gc_refs > 0, it must be in move_unreachable's 'young'
* list, and move_unreachable will eventually get to it.
*/
else {
assert(gc_refs > 0);
}
return 0;
}
/* Move the unreachable objects from young to unreachable. After this,
* all objects in young have gc_refs = GC_REACHABLE, and all objects in
* unreachable have gc_refs = GC_TENTATIVELY_UNREACHABLE. All tracked
* gc objects not in young or unreachable still have gc_refs = GC_REACHABLE.
* all objects in young don't have PREV_MASK_COLLECTING flag and
* unreachable have the flag.
* All objects in young after this are directly or indirectly reachable
* from outside the original young; and all objects in unreachable are
* not.
*
* This function restores _gc_prev pointer. young and unreachable are
* doubly linked list after this function.
* But _gc_next in unreachable list has NEXT_MASK_UNREACHABLE flag.
* So we can not gc_list_* functions for unreachable until we remove the flag.
*/
static void
move_unreachable(PyGC_Head *young, PyGC_Head *unreachable)
{
PyGC_Head *gc = young->gc.gc_next;
// previous elem in the young list, used for restore gc_prev.
PyGC_Head *prev = young;
PyGC_Head *gc = GC_NEXT(young);
/* Invariants: all objects "to the left" of us in young have gc_refs
* = GC_REACHABLE, and are indeed reachable (directly or indirectly)
* from outside the young list as it was at entry. All other objects
* from the original young "to the left" of us are in unreachable now,
* and have gc_refs = GC_TENTATIVELY_UNREACHABLE. All objects to the
/* Invariants: all objects "to the left" of us in young are reachable
* (directly or indirectly) from outside the young list as it was at entry.
*
* All other objects from the original young "to the left" of us are in
* unreachable now, and have NEXT_MASK_UNREACHABLE. All objects to the
* left of us in 'young' now have been scanned, and no objects here
* or to the right have been scanned yet.
*/
while (gc != young) {
PyGC_Head *next;
if (_PyGCHead_REFS(gc)) {
if (gc_get_refs(gc)) {
/* gc is definitely reachable from outside the
* original 'young'. Mark it as such, and traverse
* its pointers to find any other objects that may
......@@ -378,15 +496,17 @@ move_unreachable(PyGC_Head *young, PyGC_Head *unreachable)
*/
PyObject *op = FROM_GC(gc);
traverseproc traverse = Py_TYPE(op)->tp_traverse;
assert(_PyGCHead_REFS(gc) > 0);
_PyGCHead_SET_REFS(gc, GC_REACHABLE);
assert(gc_get_refs(gc) > 0);
// NOTE: visit_reachable may change gc->_gc_next when
// young->_gc_prev == gc. Don't do gc = GC_NEXT(gc) before!
(void) traverse(op,
(visitproc)visit_reachable,
(void *)young);
next = gc->gc.gc_next;
if (PyTuple_CheckExact(op)) {
_PyTuple_MaybeUntrack(op);
}
(visitproc)visit_reachable,
(void *)young);
// relink gc_prev to prev element.
_PyGCHead_SET_PREV(gc, prev);
// gc is not COLLECTING state aftere here.
gc_clear_collecting(gc);
prev = gc;
}
else {
/* This *may* be unreachable. To make progress,
......@@ -396,9 +516,37 @@ move_unreachable(PyGC_Head *young, PyGC_Head *unreachable)
* visit_reachable will eventually move gc back into
* young if that's so, and we'll see it again.
*/
next = gc->gc.gc_next;
gc_list_move(gc, unreachable);
_PyGCHead_SET_REFS(gc, GC_TENTATIVELY_UNREACHABLE);
// Move gc to unreachable.
// No need to gc->next->prev = prev because it is single linked.
prev->_gc_next = gc->_gc_next;
// We can't use gc_list_append() here because we use
// NEXT_MASK_UNREACHABLE here.
PyGC_Head *last = GC_PREV(unreachable);
// NOTE: Since all objects in unreachable set has
// NEXT_MASK_UNREACHABLE flag, we set it unconditionally.
// But this may set the flat to unreachable too.
// move_legacy_finalizers() should care about it.
last->_gc_next = (NEXT_MASK_UNREACHABLE | (uintptr_t)gc);
_PyGCHead_SET_PREV(gc, last);
gc->_gc_next = (NEXT_MASK_UNREACHABLE | (uintptr_t)unreachable);
unreachable->_gc_prev = (uintptr_t)gc;
}
gc = (PyGC_Head*)prev->_gc_next;
}
// young->_gc_prev must be last element remained in the list.
young->_gc_prev = (uintptr_t)prev;
}
static void
untrack_tuples(PyGC_Head *head)
{
PyGC_Head *next, *gc = GC_NEXT(head);
while (gc != head) {
PyObject *op = FROM_GC(gc);
next = GC_NEXT(gc);
if (PyTuple_CheckExact(op)) {
_PyTuple_MaybeUntrack(op);
}
gc = next;
}
......@@ -408,12 +556,13 @@ move_unreachable(PyGC_Head *young, PyGC_Head *unreachable)
static void
untrack_dicts(PyGC_Head *head)
{
PyGC_Head *next, *gc = head->gc.gc_next;
PyGC_Head *next, *gc = GC_NEXT(head);
while (gc != head) {
PyObject *op = FROM_GC(gc);
next = gc->gc.gc_next;
if (PyDict_CheckExact(op))
next = GC_NEXT(gc);
if (PyDict_CheckExact(op)) {
_PyDict_MaybeUntrack(op);
}
gc = next;
}
}
......@@ -426,27 +575,29 @@ has_legacy_finalizer(PyObject *op)
}
/* Move the objects in unreachable with tp_del slots into `finalizers`.
* Objects moved into `finalizers` have gc_refs set to GC_REACHABLE; the
* objects remaining in unreachable are left at GC_TENTATIVELY_UNREACHABLE.
*
* This function also removes NEXT_MASK_UNREACHABLE flag
* from _gc_next in unreachable.
*/
static void
move_legacy_finalizers(PyGC_Head *unreachable, PyGC_Head *finalizers)
{
PyGC_Head *gc;
PyGC_Head *next;
PyGC_Head *gc, *next;
unreachable->_gc_next &= ~NEXT_MASK_UNREACHABLE;
/* March over unreachable. Move objects with finalizers into
* `finalizers`.
*/
for (gc = unreachable->gc.gc_next; gc != unreachable; gc = next) {
for (gc = GC_NEXT(unreachable); gc != unreachable; gc = next) {
PyObject *op = FROM_GC(gc);
assert(IS_TENTATIVELY_UNREACHABLE(op));
next = gc->gc.gc_next;
assert(gc->_gc_next & NEXT_MASK_UNREACHABLE);
gc->_gc_next &= ~NEXT_MASK_UNREACHABLE;
next = (PyGC_Head*)gc->_gc_next;
if (has_legacy_finalizer(op)) {
gc_clear_collecting(gc);
gc_list_move(gc, finalizers);
_PyGCHead_SET_REFS(gc, GC_REACHABLE);
}
}
}
......@@ -456,10 +607,10 @@ static int
visit_move(PyObject *op, PyGC_Head *tolist)
{
if (PyObject_IS_GC(op)) {
if (IS_TENTATIVELY_UNREACHABLE(op)) {
PyGC_Head *gc = AS_GC(op);
PyGC_Head *gc = AS_GC(op);
if (gc_is_collecting(gc)) {
gc_list_move(gc, tolist);
_PyGCHead_SET_REFS(gc, GC_REACHABLE);
gc_clear_collecting(gc);
}
}
return 0;
......@@ -472,8 +623,8 @@ static void
move_legacy_finalizer_reachable(PyGC_Head *finalizers)
{
traverseproc traverse;
PyGC_Head *gc = finalizers->gc.gc_next;
for (; gc != finalizers; gc = gc->gc.gc_next) {
PyGC_Head *gc = GC_NEXT(finalizers);
for (; gc != finalizers; gc = GC_NEXT(gc)) {
/* Note that the finalizers list may grow during this. */
traverse = Py_TYPE(FROM_GC(gc))->tp_traverse;
(void) traverse(FROM_GC(gc),
......@@ -513,12 +664,11 @@ handle_weakrefs(PyGC_Head *unreachable, PyGC_Head *old)
* make another pass over wrcb_to_call, invoking callbacks, after this
* pass completes.
*/
for (gc = unreachable->gc.gc_next; gc != unreachable; gc = next) {
for (gc = GC_NEXT(unreachable); gc != unreachable; gc = next) {
PyWeakReference **wrlist;
op = FROM_GC(gc);
assert(IS_TENTATIVELY_UNREACHABLE(op));
next = gc->gc.gc_next;
next = GC_NEXT(gc);
if (! PyType_SUPPORTS_WEAKREFS(Py_TYPE(op)))
continue;
......@@ -572,9 +722,9 @@ handle_weakrefs(PyGC_Head *unreachable, PyGC_Head *old)
* to imagine how calling it later could create a problem for us. wr
* is moved to wrcb_to_call in this case.
*/
if (IS_TENTATIVELY_UNREACHABLE(wr))
if (gc_is_collecting(AS_GC(wr))) {
continue;
assert(IS_REACHABLE(wr));
}
/* Create a new reference so that wr can't go away
* before we can process it again.
......@@ -597,9 +747,8 @@ handle_weakrefs(PyGC_Head *unreachable, PyGC_Head *old)
PyObject *temp;
PyObject *callback;
gc = wrcb_to_call.gc.gc_next;
gc = (PyGC_Head*)wrcb_to_call._gc_next;
op = FROM_GC(gc);
assert(IS_REACHABLE(op));
assert(PyWeakref_Check(op));
wr = (PyWeakReference *)op;
callback = wr->wr_callback;
......@@ -624,12 +773,13 @@ handle_weakrefs(PyGC_Head *unreachable, PyGC_Head *old)
* ours).
*/
Py_DECREF(op);
if (wrcb_to_call.gc.gc_next == gc) {
if (wrcb_to_call._gc_next == (uintptr_t)gc) {
/* object is still alive -- move it */
gc_list_move(gc, old);
}
else
else {
++num_freed;
}
}
return num_freed;
......@@ -652,7 +802,7 @@ debug_cycle(const char *msg, PyObject *op)
static void
handle_legacy_finalizers(PyGC_Head *finalizers, PyGC_Head *old)
{
PyGC_Head *gc = finalizers->gc.gc_next;
PyGC_Head *gc = GC_NEXT(finalizers);
assert(!PyErr_Occurred());
if (_PyRuntime.gc.garbage == NULL) {
......@@ -660,7 +810,7 @@ handle_legacy_finalizers(PyGC_Head *finalizers, PyGC_Head *old)
if (_PyRuntime.gc.garbage == NULL)
Py_FatalError("gc couldn't create gc.garbage list");
}
for (; gc != finalizers; gc = gc->gc.gc_next) {
for (; gc != finalizers; gc = GC_NEXT(gc)) {
PyObject *op = FROM_GC(gc);
if ((_PyRuntime.gc.debug & DEBUG_SAVEALL) || has_legacy_finalizer(op)) {
......@@ -695,13 +845,13 @@ finalize_garbage(PyGC_Head *collectable)
gc_list_init(&seen);
while (!gc_list_is_empty(collectable)) {
PyGC_Head *gc = collectable->gc.gc_next;
PyGC_Head *gc = GC_NEXT(collectable);
PyObject *op = FROM_GC(gc);
gc_list_move(gc, &seen);
if (!_PyGCHead_FINALIZED(gc) &&
PyType_HasFeature(Py_TYPE(op), Py_TPFLAGS_HAVE_FINALIZE) &&
(finalize = Py_TYPE(op)->tp_finalize) != NULL) {
_PyGCHead_SET_FINALIZED(gc, 1);
_PyGCHead_SET_FINALIZED(gc);
Py_INCREF(op);
finalize(op);
assert(!PyErr_Occurred());
......@@ -717,30 +867,26 @@ finalize_garbage(PyGC_Head *collectable)
static int
check_garbage(PyGC_Head *collectable)
{
int ret = 0;
PyGC_Head *gc;
for (gc = collectable->gc.gc_next; gc != collectable;
gc = gc->gc.gc_next) {
_PyGCHead_SET_REFS(gc, Py_REFCNT(FROM_GC(gc)));
assert(_PyGCHead_REFS(gc) != 0);
for (gc = GC_NEXT(collectable); gc != collectable; gc = GC_NEXT(gc)) {
// Use gc_refs and break gc_prev again.
gc_set_refs(gc, Py_REFCNT(FROM_GC(gc)));
assert(gc_get_refs(gc) != 0);
}
subtract_refs(collectable);
for (gc = collectable->gc.gc_next; gc != collectable;
gc = gc->gc.gc_next) {
assert(_PyGCHead_REFS(gc) >= 0);
if (_PyGCHead_REFS(gc) != 0)
return -1;
}
return 0;
}
static void
revive_garbage(PyGC_Head *collectable)
{
PyGC_Head *gc;
for (gc = collectable->gc.gc_next; gc != collectable;
gc = gc->gc.gc_next) {
_PyGCHead_SET_REFS(gc, GC_REACHABLE);
PyGC_Head *prev = collectable;
for (gc = GC_NEXT(collectable); gc != collectable; gc = GC_NEXT(gc)) {
assert(gc_get_refs(gc) >= 0);
if (gc_get_refs(gc) != 0) {
ret = -1;
}
// Restore gc_prev here.
_PyGCHead_SET_PREV(gc, prev);
gc_clear_collecting(gc);
prev = gc;
}
return ret;
}
/* Break reference cycles by clearing the containers involved. This is
......@@ -754,9 +900,11 @@ delete_garbage(PyGC_Head *collectable, PyGC_Head *old)
assert(!PyErr_Occurred());
while (!gc_list_is_empty(collectable)) {
PyGC_Head *gc = collectable->gc.gc_next;
PyGC_Head *gc = GC_NEXT(collectable);
PyObject *op = FROM_GC(gc);
assert(Py_REFCNT(FROM_GC(gc)) > 0);
if (_PyRuntime.gc.debug & DEBUG_SAVEALL) {
assert(_PyRuntime.gc.garbage != NULL);
if (PyList_Append(_PyRuntime.gc.garbage, op) < 0) {
......@@ -775,10 +923,9 @@ delete_garbage(PyGC_Head *collectable, PyGC_Head *old)
Py_DECREF(op);
}
}
if (collectable->gc.gc_next == gc) {
if (GC_NEXT(collectable) == gc) {
/* object is still alive, move it, it may die later */
gc_list_move(gc, old);
_PyGCHead_SET_REFS(gc, GC_REACHABLE);
}
}
}
......@@ -857,12 +1004,14 @@ collect(int generation, Py_ssize_t *n_collected, Py_ssize_t *n_uncollectable,
else
old = young;
validate_list(young, 0);
validate_list(old, 0);
/* Using ob_refcnt and gc_refs, calculate which objects in the
* container set are reachable from outside the set (i.e., have a
* refcount greater than 0 when all the references within the
* set are taken into account).
*/
update_refs(young);
update_refs(young); // gc_prev is used for gc_refs
subtract_refs(young);
/* Leave everything reachable from outside young in young, and move
......@@ -872,8 +1021,10 @@ collect(int generation, Py_ssize_t *n_collected, Py_ssize_t *n_uncollectable,
* so it's more efficient to move the unreachable things.
*/
gc_list_init(&unreachable);
move_unreachable(young, &unreachable);
move_unreachable(young, &unreachable); // gc_prev is pointer again
validate_list(young, 0);
untrack_tuples(young);
/* Move reachable objects to next generation. */
if (young != old) {
if (generation == NUM_GENERATIONS - 2) {
......@@ -893,6 +1044,8 @@ collect(int generation, Py_ssize_t *n_collected, Py_ssize_t *n_uncollectable,
* legacy finalizers (e.g. tp_del) can't safely be deleted.
*/
gc_list_init(&finalizers);
// NEXT_MASK_UNREACHABLE is cleared here.
// After move_legacy_finalizers(), unreachable is normal list.
move_legacy_finalizers(&unreachable, &finalizers);
/* finalizers contains the unreachable objects with a legacy finalizer;
* unreachable objects reachable *from* those are also uncollectable,
......@@ -900,11 +1053,13 @@ collect(int generation, Py_ssize_t *n_collected, Py_ssize_t *n_uncollectable,
*/
move_legacy_finalizer_reachable(&finalizers);
validate_list(&finalizers, 0);
validate_list(&unreachable, PREV_MASK_COLLECTING);
/* Collect statistics on collectable objects found and print
* debugging information.
*/
for (gc = unreachable.gc.gc_next; gc != &unreachable;
gc = gc->gc.gc_next) {
for (gc = GC_NEXT(&unreachable); gc != &unreachable; gc = GC_NEXT(gc)) {
m++;
if (_PyRuntime.gc.debug & DEBUG_COLLECTABLE) {
debug_cycle("collectable", FROM_GC(gc));
......@@ -914,11 +1069,13 @@ collect(int generation, Py_ssize_t *n_collected, Py_ssize_t *n_uncollectable,
/* Clear weakrefs and invoke callbacks as necessary. */
m += handle_weakrefs(&unreachable, old);
validate_list(old, 0);
validate_list(&unreachable, PREV_MASK_COLLECTING);
/* Call tp_finalize on objects which have one. */
finalize_garbage(&unreachable);
if (check_garbage(&unreachable)) {
revive_garbage(&unreachable);
if (check_garbage(&unreachable)) { // clear PREV_MASK_COLLECTING here
gc_list_merge(&unreachable, old);
}
else {
......@@ -931,9 +1088,7 @@ collect(int generation, Py_ssize_t *n_collected, Py_ssize_t *n_uncollectable,
/* Collect statistics on uncollectable objects found and print
* debugging information. */
for (gc = finalizers.gc.gc_next;
gc != &finalizers;
gc = gc->gc.gc_next) {
for (gc = GC_NEXT(&finalizers); gc != &finalizers; gc = GC_NEXT(gc)) {
n++;
if (_PyRuntime.gc.debug & DEBUG_UNCOLLECTABLE)
debug_cycle("uncollectable", FROM_GC(gc));
......@@ -956,6 +1111,7 @@ collect(int generation, Py_ssize_t *n_collected, Py_ssize_t *n_uncollectable,
* this if they insist on creating this type of structure.
*/
handle_legacy_finalizers(&finalizers, old);
validate_list(old, 0);
/* Clear free list only during the collection of the highest
* generation */
......@@ -1263,7 +1419,7 @@ gc_referrers_for(PyObject *objs, PyGC_Head *list, PyObject *resultlist)
PyGC_Head *gc;
PyObject *obj;
traverseproc traverse;
for (gc = list->gc.gc_next; gc != list; gc = gc->gc.gc_next) {
for (gc = GC_NEXT(list); gc != list; gc = GC_NEXT(gc)) {
obj = FROM_GC(gc);
traverse = Py_TYPE(obj)->tp_traverse;
if (obj == objs || obj == resultlist)
......@@ -1423,7 +1579,7 @@ gc_is_tracked(PyObject *module, PyObject *obj)
{
PyObject *result;
if (PyObject_IS_GC(obj) && IS_TRACKED(obj))
if (PyObject_IS_GC(obj) && _PyObject_GC_IS_TRACKED(obj))
result = Py_True;
else
result = Py_False;
......@@ -1696,8 +1852,9 @@ PyObject_GC_UnTrack(void *op)
/* Obscure: the Py_TRASHCAN mechanism requires that we be able to
* call PyObject_GC_UnTrack twice on an object.
*/
if (IS_TRACKED(op))
if (_PyObject_GC_IS_TRACKED(op)) {
_PyObject_GC_UNTRACK(op);
}
}
static PyObject *
......@@ -1715,8 +1872,9 @@ _PyObject_GC_Alloc(int use_calloc, size_t basicsize)
g = (PyGC_Head *)PyObject_Malloc(size);
if (g == NULL)
return PyErr_NoMemory();
g->gc.gc_refs = 0;
_PyGCHead_SET_REFS(g, GC_UNTRACKED);
assert(((uintptr_t)g & 3) == 0); // g must be aligned 4bytes boundary
g->_gc_next = 0;
g->_gc_prev = 0;
_PyRuntime.gc.generations[0].count++; /* number of allocated GC objects */
if (_PyRuntime.gc.generations[0].count > _PyRuntime.gc.generations[0].threshold &&
_PyRuntime.gc.enabled &&
......@@ -1774,7 +1932,7 @@ _PyObject_GC_Resize(PyVarObject *op, Py_ssize_t nitems)
{
const size_t basicsize = _PyObject_VAR_SIZE(Py_TYPE(op), nitems);
PyGC_Head *g = AS_GC(op);
assert(!IS_TRACKED(op));
assert(!_PyObject_GC_IS_TRACKED(op));
if (basicsize > PY_SSIZE_T_MAX - sizeof(PyGC_Head))
return (PyVarObject *)PyErr_NoMemory();
g = (PyGC_Head *)PyObject_REALLOC(g, sizeof(PyGC_Head) + basicsize);
......@@ -1789,8 +1947,9 @@ void
PyObject_GC_Del(void *op)
{
PyGC_Head *g = AS_GC(op);
if (IS_TRACKED(op))
if (_PyObject_GC_IS_TRACKED(op)) {
gc_list_remove(g);
}
if (_PyRuntime.gc.generations[0].count > 0) {
_PyRuntime.gc.generations[0].count--;
}
......
......@@ -284,8 +284,9 @@ PyObject_CallFinalizer(PyObject *self)
return;
tp->tp_finalize(self);
if (PyType_IS_GC(tp))
_PyGC_SET_FINALIZED(self, 1);
if (PyType_IS_GC(tp)) {
_PyGC_SET_FINALIZED(self);
}
}
int
......@@ -2095,7 +2096,7 @@ _PyTrash_deposit_object(PyObject *op)
assert(PyObject_IS_GC(op));
assert(!_PyObject_GC_IS_TRACKED(op));
assert(op->ob_refcnt == 0);
_Py_AS_GC(op)->gc.gc_prev = (PyGC_Head *)_PyRuntime.gc.trash_delete_later;
_PyGCHead_SET_PREV(_Py_AS_GC(op), _PyRuntime.gc.trash_delete_later);
_PyRuntime.gc.trash_delete_later = op;
}
......@@ -2107,7 +2108,7 @@ _PyTrash_thread_deposit_object(PyObject *op)
assert(PyObject_IS_GC(op));
assert(!_PyObject_GC_IS_TRACKED(op));
assert(op->ob_refcnt == 0);
_Py_AS_GC(op)->gc.gc_prev = (PyGC_Head *) tstate->trash_delete_later;
_PyGCHead_SET_PREV(_Py_AS_GC(op), tstate->trash_delete_later);
tstate->trash_delete_later = op;
}
......@@ -2122,7 +2123,7 @@ _PyTrash_destroy_chain(void)
destructor dealloc = Py_TYPE(op)->tp_dealloc;
_PyRuntime.gc.trash_delete_later =
(PyObject*) _Py_AS_GC(op)->gc.gc_prev;
(PyObject*) _PyGCHead_PREV(_Py_AS_GC(op));
/* Call the deallocator directly. This used to try to
* fool Py_DECREF into calling it indirectly, but
......@@ -2160,7 +2161,7 @@ _PyTrash_thread_destroy_chain(void)
destructor dealloc = Py_TYPE(op)->tp_dealloc;
tstate->trash_delete_later =
(PyObject*) _Py_AS_GC(op)->gc.gc_prev;
(PyObject*) _PyGCHead_PREV(_Py_AS_GC(op));
/* Call the deallocator directly. This used to try to
* fool Py_DECREF into calling it indirectly, but
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment