Commit 847630cc authored by Rudi Chen's avatar Rudi Chen

Implement PyPy's finalization ordering algorithm.

- Call all finalizers properly, in a safe-to-do-so order.
- Make finalizers work together with weak reference callbacks - namely,
  both should be able to get called, and weakref callbacks should be
  called before finalizers.
- Old style classes are also supported, but we always do an attribute
  lookup for tp_del because there's no tp_del slot. Since it would be
  too slow to do an attribute lookup all the time in GC, we just assume
  all old-style classes have an ordered finalizer.
parent debf2077
......@@ -2650,13 +2650,9 @@ public:
}
void doSafePoint(AST_stmt* next_statement) override {
// If the sampling profiler is turned on (and eventually, destructors), we need frame-introspection
// support while in allowGLReadPreemption:
#if ENABLE_SAMPLING_PROFILER
// Unwind info is always needed in allowGLReadPreemption if it has any chance of
// running arbitrary code like finalizers.
emitter.createCall(UnwindInfo(next_statement, NULL), g.funcs.allowGLReadPreemption);
#else
emitter.getBuilder()->CreateCall(g.funcs.allowGLReadPreemption);
#endif
}
};
......
......@@ -23,6 +23,7 @@
#include "core/common.h"
#include "core/thread_utils.h"
#include "gc/collector.h"
namespace pyston {
class Box;
......@@ -108,6 +109,16 @@ extern "C" inline void allowGLReadPreemption() {
}
#endif
// We need to call the finalizers on dead objects at some point. This is a safe place to do so.
// This needs to be done before checking for other threads waiting on the GIL since there could
// be only one thread doing a lot of work. Similarly for weakref callbacks.
//
// The conditional is an optimization - the function will do nothing if the lists are empty,
// but it's worth checking for to avoid the overhead of making a function call.
if (!gc::pending_finalization_list.empty() || !gc::weakrefs_needing_callback_list.empty()) {
gc::callPendingDestructionLogic();
}
// Double-checked locking: first read with no ordering constraint:
if (!threads_waiting_on_gil.load(std::memory_order_relaxed))
return;
......
......@@ -615,6 +615,7 @@ extern "C" PyObject* PystonType_GenericAlloc(BoxedClass* cls, Py_ssize_t nitems)
\
/* Don't allocate classes through this -- we need to keep track of all class objects. */ \
assert(default_cls != type_cls); \
assert(!gc::hasOrderedFinalizer(default_cls)); \
\
/* note: we want to use size instead of tp_basicsize, since size is a compile-time constant */ \
void* mem = gc_alloc(size, gc::GCKind::PYTHON); \
......
......@@ -39,6 +39,11 @@ namespace gc {
FILE* trace_fp;
#endif
std::deque<Box*> pending_finalization_list;
std::deque<PyWeakReference*> weakrefs_needing_callback_list;
std::list<Box*> objects_with_ordered_finalizers;
static std::unordered_set<void*> roots;
static std::vector<std::pair<void*, void*>> potential_root_ranges;
......@@ -62,6 +67,12 @@ static int ncollections = 0;
static bool gc_enabled = true;
static bool should_not_reenter_gc = false;
enum TraceStackType {
MarkPhase,
FinalizationOrderingFindReachable,
FinalizationOrderingRemoveTemporaries,
};
class TraceStack {
private:
const int CHUNK_SIZE = 256;
......@@ -74,6 +85,8 @@ private:
void** start;
void** end;
TraceStackType visit_type;
void get_chunk() {
if (free_chunks.size()) {
start = free_chunks.back();
......@@ -99,10 +112,10 @@ private:
}
public:
TraceStack() { get_chunk(); }
TraceStack(const std::unordered_set<void*>& rhs) {
TraceStack(TraceStackType type) : visit_type(type) { get_chunk(); }
TraceStack(TraceStackType type, const std::unordered_set<void*>& root_handles) : visit_type(type) {
get_chunk();
for (void* p : rhs) {
for (void* p : root_handles) {
assert(!isMarked(GCAllocation::fromUserData(p)));
push(p);
}
......@@ -111,10 +124,56 @@ public:
void push(void* p) {
GC_TRACE_LOG("Pushing %p\n", p);
GCAllocation* al = GCAllocation::fromUserData(p);
if (isMarked(al))
return;
switch (visit_type) {
case TraceStackType::MarkPhase:
// Use this to print the directed edges of the GC graph traversal.
// i.e. print every a -> b where a is a pointer and b is something a references
#if 0
if (previous_pop) {
GCAllocation* source_allocation = GCAllocation::fromUserData(previous_pop);
if (source_allocation->kind_id == GCKind::PYTHON) {
printf("(%s) ", ((Box*)previous_pop)->cls->tp_name);
}
printf("%p > %p", previous_pop, al->user_data);
} else {
printf("source %p", al->user_data);
}
if (al->kind_id == GCKind::PYTHON) {
printf(" (%s)", ((Box*)al->user_data)->cls->tp_name);
}
printf("\n");
#endif
if (isMarked(al)) {
return;
} else {
setMark(al);
}
break;
// See PyPy's finalization ordering algorithm:
// http://pypy.readthedocs.org/en/latest/discussion/finalizer-order.html
case TraceStackType::FinalizationOrderingFindReachable:
if (orderingState(al) == FinalizationState::UNREACHABLE) {
setOrderingState(al, FinalizationState::TEMPORARY);
} else if (orderingState(al) == FinalizationState::REACHABLE_FROM_FINALIZER) {
setOrderingState(al, FinalizationState::ALIVE);
} else {
return;
}
break;
case TraceStackType::FinalizationOrderingRemoveTemporaries:
if (orderingState(al) == FinalizationState::TEMPORARY) {
setOrderingState(al, FinalizationState::REACHABLE_FROM_FINALIZER);
} else {
return;
}
break;
default:
assert(false);
}
*cur++ = p;
if (cur == end) {
......@@ -218,11 +277,34 @@ void registerPythonObject(Box* b) {
}
assert(b->cls);
if (hasOrderedFinalizer(b->cls)) {
objects_with_ordered_finalizers.push_back(b);
}
if (PyType_Check(b)) {
class_objects.insert((BoxedClass*)b);
}
}
void invalidateOrderedFinalizerList() {
static StatCounter sc_us("us_gc_invalidate_ordered_finalizer_list");
Timer _t("invalidateOrderedFinalizerList", /*min_usec=*/10000);
for (auto iter = objects_with_ordered_finalizers.begin(); iter != objects_with_ordered_finalizers.end();) {
Box* box = *iter;
GCAllocation* al = GCAllocation::fromUserData(box);
if (!hasOrderedFinalizer(box->cls) || hasFinalized(al)) {
// Cleanup.
iter = objects_with_ordered_finalizers.erase(iter);
} else {
++iter;
}
}
long us = _t.end();
sc_us.log(us);
}
GCRootHandle::GCRootHandle() {
getRootHandles()->insert(this);
}
......@@ -335,6 +417,89 @@ static void markRoots(GCVisitor& visitor) {
for (auto& e : potential_root_ranges) {
visitor.visitPotentialRange((void* const*)e.first, (void* const*)e.second);
}
GC_TRACE_LOG("Looking at pending finalization list\n");
for (auto box : pending_finalization_list) {
visitor.visit(box);
}
GC_TRACE_LOG("Looking at weakrefs needing callbacks list\n");
for (auto weakref : weakrefs_needing_callback_list) {
visitor.visit(weakref);
}
}
static void finalizationOrderingFindReachable(Box* obj) {
static StatCounter sc_marked_objs("gc_marked_object_count_finalizer_ordering");
static StatCounter sc_us("us_gc_mark_finalizer_ordering_1");
Timer _t("finalizationOrderingFindReachable", /*min_usec=*/10000);
TraceStack stack(TraceStackType::FinalizationOrderingFindReachable);
GCVisitor visitor(&stack);
stack.push(obj);
while (void* p = stack.pop()) {
sc_marked_objs.log();
visitByGCKind(p, visitor);
}
long us = _t.end();
sc_us.log(us);
}
static void finalizationOrderingRemoveTemporaries(Box* obj) {
static StatCounter sc_us("us_gc_mark_finalizer_ordering_2");
Timer _t("finalizationOrderingRemoveTemporaries", /*min_usec=*/10000);
TraceStack stack(TraceStackType::FinalizationOrderingRemoveTemporaries);
GCVisitor visitor(&stack);
stack.push(obj);
while (void* p = stack.pop()) {
GCAllocation* al = GCAllocation::fromUserData(p);
assert(orderingState(al) != FinalizationState::UNREACHABLE);
visitByGCKind(p, visitor);
}
long us = _t.end();
sc_us.log(us);
}
// Implementation of PyPy's finalization ordering algorithm:
// http://pypy.readthedocs.org/en/latest/discussion/finalizer-order.html
static void orderFinalizers() {
static StatCounter sc_us("us_gc_finalization_ordering");
Timer _t("finalizationOrdering", /*min_usec=*/10000);
std::vector<Box*> finalizer_marked;
for (Box* obj : objects_with_ordered_finalizers) {
GCAllocation* al = GCAllocation::fromUserData(obj);
// We are only interested in object with finalizers that need to be garbage-collected.
if (orderingState(al) == FinalizationState::UNREACHABLE) {
assert(hasOrderedFinalizer(obj->cls));
finalizer_marked.push_back(obj);
finalizationOrderingFindReachable(obj);
finalizationOrderingRemoveTemporaries(obj);
}
}
for (Box* marked : finalizer_marked) {
GCAllocation* al = GCAllocation::fromUserData(marked);
FinalizationState state = orderingState(al);
assert(state == FinalizationState::REACHABLE_FROM_FINALIZER || state == FinalizationState::ALIVE);
if (state == FinalizationState::REACHABLE_FROM_FINALIZER) {
pending_finalization_list.push_back(marked);
}
}
long us = _t.end();
sc_us.log(us);
}
static void graphTraversalMarking(TraceStack& stack, GCVisitor& visitor) {
......@@ -362,6 +527,100 @@ static void graphTraversalMarking(TraceStack& stack, GCVisitor& visitor) {
sc_us.log(us);
}
static void callWeakrefCallback(PyWeakReference* head) {
if (head->wr_callback) {
runtimeCall(head->wr_callback, ArgPassSpec(1), reinterpret_cast<Box*>(head), NULL, NULL, NULL, NULL);
head->wr_callback = NULL;
}
}
static void callPendingFinalizers() {
static StatCounter sc_us_finalizer("us_gc_finalizercalls");
Timer _timer_finalizer("calling finalizers", /*min_usec=*/10000);
bool initially_empty = pending_finalization_list.empty();
// An object can be resurrected in the finalizer code. So when we call a finalizer, we
// mark the finalizer as having been called, but the object is only freed in another
// GC pass (objects whose finalizers have been called are treated the same as objects
// without finalizers).
while (!pending_finalization_list.empty()) {
Box* box = pending_finalization_list.front();
pending_finalization_list.pop_front();
assert(isValidGCObject(box));
if (isWeaklyReferenced(box)) {
// Callbacks for weakly-referenced objects with finalizers (if any), followed by call to finalizers.
PyWeakReference** list = (PyWeakReference**)PyObject_GET_WEAKREFS_LISTPTR(box);
while (PyWeakReference* head = *list) {
assert(isValidGCObject(head));
if (head->wr_object != Py_None) {
assert(head->wr_object == box);
_PyWeakref_ClearRef(head);
callWeakrefCallback(head);
}
}
}
finalize(box);
}
if (!initially_empty) {
invalidateOrderedFinalizerList();
}
sc_us_finalizer.log(_timer_finalizer.end());
}
static void callPendingWeakrefCallbacks() {
static StatCounter sc_us_weakref("us_gc_weakrefcalls");
Timer _timer_weakref("calling weakref callbacks", /*min_usec=*/10000);
// Callbacks for weakly-referenced objects without finalizers.
while (!weakrefs_needing_callback_list.empty()) {
PyWeakReference* head = weakrefs_needing_callback_list.front();
weakrefs_needing_callback_list.pop_front();
callWeakrefCallback(head);
}
sc_us_weakref.log(_timer_weakref.end());
}
void callPendingDestructionLogic() {
static bool callingPending = false;
// Calling finalizers is likely going to lead to another call to allowGLReadPreemption
// and reenter callPendingDestructionLogic, so we'd really only be calling
// one finalizer per function call to callPendingFinalizers/WeakrefCallbacks. The purpose
// of this boolean is to avoid that.
if (!callingPending) {
callingPending = true;
callPendingFinalizers();
callPendingWeakrefCallbacks();
callingPending = false;
}
}
static void prepareWeakrefCallbacks(Box* box) {
PyWeakReference** list = (PyWeakReference**)PyObject_GET_WEAKREFS_LISTPTR(box);
while (PyWeakReference* head = *list) {
assert(isValidGCObject(head));
if (head->wr_object != Py_None) {
assert(head->wr_object == box);
_PyWeakref_ClearRef(head);
if (head->wr_callback) {
weakrefs_needing_callback_list.push_back(head);
}
}
}
}
static void markPhase() {
static StatCounter sc_us("us_gc_mark_phase");
Timer _t("markPhase", /*min_usec=*/10000);
......@@ -375,7 +634,7 @@ static void markPhase() {
GC_TRACE_LOG("Starting collection %d\n", ncollections);
GC_TRACE_LOG("Looking at roots\n");
TraceStack stack(roots);
TraceStack stack(TraceStackType::MarkPhase, roots);
GCVisitor visitor(&stack);
markRoots(visitor);
......@@ -411,6 +670,17 @@ static void markPhase() {
class_objects.insert(cls->cls);
}
// Objects with finalizers cannot be freed in any order. During the call to a finalizer
// of an object, the finalizer expects the object's references to still point to valid
// memory. So we root objects whose finalizers need to be called by placing them in a
// pending finalization list.
orderFinalizers();
#if TRACE_GC_MARKING
fclose(trace_fp);
trace_fp = NULL;
#endif
#ifndef NVALGRIND
VALGRIND_ENABLE_ERROR_REPORTING;
#endif
......@@ -501,25 +771,10 @@ void runCollection() {
// - first, find all of the weakref objects whose callbacks we need to call. we need to iterate
// over the garbage-and-corrupt-but-still-alive weakly_referenced list in order to find these objects,
// so the gc is not reentrant during this section. after this we discard that list.
// - then, call all the weakref callbacks we collected from the first pass.
// Use a StlCompatAllocator to keep the pending weakref objects alive in case we trigger a new collection.
// In theory we could push so much onto this list that we would cause a new collection to start:
std::list<PyWeakReference*, StlCompatAllocator<PyWeakReference*>> weak_references;
// - the callbacks are called later, along with the finalizers
for (auto o : weakly_referenced) {
assert(isValidGCObject(o));
PyWeakReference** list = (PyWeakReference**)PyObject_GET_WEAKREFS_LISTPTR(o);
while (PyWeakReference* head = *list) {
assert(isValidGCObject(head));
if (head->wr_object != Py_None) {
assert(head->wr_object == o);
_PyWeakref_ClearRef(head);
if (head->wr_callback)
weak_references.push_back(head);
}
}
prepareWeakrefCallbacks(o);
global_heap.free(GCAllocation::fromUserData(o));
}
......@@ -530,17 +785,6 @@ void runCollection() {
should_not_reenter_gc = false; // end non-reentrant section
while (!weak_references.empty()) {
PyWeakReference* head = weak_references.front();
weak_references.pop_front();
if (head->wr_callback) {
runtimeCall(head->wr_callback, ArgPassSpec(1), reinterpret_cast<Box*>(head), NULL, NULL, NULL, NULL);
head->wr_callback = NULL;
}
}
global_heap.cleanupAfterCollection();
if (VERBOSITY("gc") >= 2)
......
......@@ -15,6 +15,8 @@
#ifndef PYSTON_GC_COLLECTOR_H
#define PYSTON_GC_COLLECTOR_H
#include <deque>
#include <list>
#include <vector>
#include "core/types.h"
......@@ -30,6 +32,9 @@ extern FILE* trace_fp;
#define GC_TRACE_LOG(...)
#endif
extern std::deque<Box*> pending_finalization_list;
extern std::deque<PyWeakReference*> weakrefs_needing_callback_list;
// Mark this gc-allocated object as being a root, even if there are no visible references to it.
// (Note: this marks the gc allocation itself, not the pointer that points to one. For that, use
// a GCRootHandle)
......@@ -58,6 +63,7 @@ public:
Box* operator->() { return value; }
};
void callPendingDestructionLogic();
void runCollection();
// Python programs are allowed to pause the GC. This is supposed to pause automatic GC,
......@@ -72,6 +78,7 @@ bool isValidGCMemory(void* p); // if p is a valid gc-allocated pointer (or a non
bool isValidGCObject(void* p); // whether p is valid gc memory and is set to have Python destructor semantics applied
bool isNonheapRoot(void* p);
void registerPythonObject(Box* b);
void invalidateOrderedFinalizerList();
// Debugging/validation helpers: if a GC should not happen in certain sections (ex during unwinding),
// use these functions to mark that. This is different from disableGC/enableGC, since it causes an
......
......@@ -91,6 +91,7 @@ inline void sweepList(ListT* head, std::vector<Box*>& weakly_referenced, Free fr
auto cur = head;
while (cur) {
GCAllocation* al = cur->data;
clearOrderingState(al);
if (isMarked(al)) {
clearMark(al);
cur = cur->next;
......@@ -122,6 +123,39 @@ void _bytesAllocatedTripped() {
runCollection();
}
//////
/// Finalizers
bool hasOrderedFinalizer(BoxedClass* cls) {
if (cls->has_safe_tp_dealloc) {
assert(!cls->tp_del);
return false;
} else if (cls->hasNonDefaultTpDealloc()) {
return true;
} else {
// The default tp_dealloc calls tp_del if there is one.
return cls->tp_del != NULL;
}
}
void finalize(Box* b) {
GCAllocation* al = GCAllocation::fromUserData(b);
assert(!hasFinalized(al));
setFinalized(al);
b->cls->tp_dealloc(b);
}
__attribute__((always_inline)) bool isWeaklyReferenced(Box* b) {
if (PyType_SUPPORTS_WEAKREFS(b->cls)) {
PyWeakReference** list = (PyWeakReference**)PyObject_GET_WEAKREFS_LISTPTR(b);
if (list && *list) {
return true;
}
}
return false;
}
Heap global_heap;
__attribute__((always_inline)) bool _doFree(GCAllocation* al, std::vector<Box*>* weakly_referenced) {
......@@ -145,17 +179,23 @@ __attribute__((always_inline)) bool _doFree(GCAllocation* al, std::vector<Box*>*
#endif
assert(b->cls);
if (PyType_SUPPORTS_WEAKREFS(b->cls)) {
PyWeakReference** list = (PyWeakReference**)PyObject_GET_WEAKREFS_LISTPTR(b);
if (list && *list) {
if (isWeaklyReferenced(b)) {
assert(weakly_referenced && "attempting to free a weakly referenced object manually");
weakly_referenced->push_back(b);
return false;
}
}
ASSERT(!hasOrderedFinalizer(b->cls) || hasFinalized(al) || alloc_kind == GCKind::CONSERVATIVE_PYTHON, "%s",
getTypeName(b));
if (b->cls->tp_dealloc != dealloc_null && b->cls->has_safe_tp_dealloc) {
gc_safe_destructors.log();
GCAllocation* al = GCAllocation::fromUserData(b);
assert(!hasFinalized(al));
assert(!hasOrderedFinalizer(b->cls));
// Don't bother setting the finalized flag since the object is getting freed right now.
b->cls->tp_dealloc(b);
}
}
......@@ -452,6 +492,7 @@ SmallArena::Block** SmallArena::_freeChain(Block** head, std::vector<Box*>& weak
void* p = &b->atoms[atom_idx];
GCAllocation* al = reinterpret_cast<GCAllocation*>(p);
clearOrderingState(al);
if (isMarked(al)) {
clearMark(al);
} else {
......
......@@ -170,6 +170,10 @@ inline void clearOrderingState(GCAllocation* header) {
#undef FINALIZER_HAS_RUN_BIT
#undef ORDERING_BITS
bool hasOrderedFinalizer(BoxedClass* cls);
void finalize(Box* b);
bool isWeaklyReferenced(Box* b);
#define PAGE_SIZE 4096
template <uintptr_t arena_start, uintptr_t arena_size, uintptr_t initial_mapsize, uintptr_t increment> class Arena {
......
......@@ -20,6 +20,11 @@ namespace pyston {
static Box* gcCollect() {
gc::runCollection();
// I think it's natural that the user would expect the finalizers to get run here if we're forcing
// a GC pass. It should be safe to do, and makes testing easier also.
gc::callPendingDestructionLogic();
return None;
}
......
......@@ -335,12 +335,59 @@ void dealloc_null(Box* box) {
assert(box->cls->tp_del == NULL);
}
// Analoguous to CPython's implementation of subtype_dealloc, but having a GC
// saves us from complications involving "trashcan macros".
//
// This is the default destructor assigned to the tp_dealloc slot, the C/C++
// implementation of a Python object destructor. It may call the Python-implemented
// destructor __del__ stored in tp_del, if any.
//
// For now, we treat tp_del and tp_dealloc as one unit. In theory, we will only
// have both if we have a Python class with a __del__ method that subclasses from
// a C extension with a non-trivial tp_dealloc. We assert on that case for now
// until we run into actual code with this fairly rare situation.
//
// This case (having both tp_del and tp_dealloc) shouldn't be a problem if we
// remove the assert, except in the exceptional case where the __del__ method
// does object resurrection. The fix for this would be to spread out tp_del,
// tp_dealloc and sweeping over 3 GC passes. This would slightly impact the
// performance of Pyston as a whole for a case that may not exist in any
// production code, so we decide not to handle that edge case for now.
static void subtype_dealloc(Box* self) {
BoxedClass* type = self->cls;
if (type->tp_del) {
type->tp_del(self);
}
// Find nearest base with a different tp_dealloc.
BoxedClass* base = type;
while (base && base->tp_dealloc == subtype_dealloc) {
base = base->tp_base;
}
if (base && base->tp_dealloc && base->tp_dealloc != dealloc_null) {
RELEASE_ASSERT(!type->tp_del, "having both a tp_del and tp_dealloc not supported");
base->tp_dealloc(self);
}
}
// We don't need CPython's version of tp_free since we have GC.
// We still need to set tp_free to something and not a NULL pointer,
// because C extensions might still call tp_free from tp_dealloc.
void default_free(void*) {
}
bool BoxedClass::hasNonDefaultTpDealloc() {
// Find nearest base with a different tp_dealloc.
BoxedClass* base = this;
while (base && base->tp_dealloc == subtype_dealloc) {
base = base->tp_base;
}
return base && base->tp_dealloc && base->tp_dealloc != dealloc_null;
}
void BoxedClass::freeze() {
assert(!is_constant);
assert(tp_name); // otherwise debugging will be very hard
......@@ -396,7 +443,21 @@ BoxedClass::BoxedClass(BoxedClass* base, gcvisit_func gc_visit, int attrs_offset
assert(cls == type_cls || isSubclass(cls, type_cls));
}
assert(tp_dealloc == NULL);
if (is_user_defined) {
tp_dealloc = subtype_dealloc;
} else {
// We don't want default types like dict to have subtype_dealloc as a destructor.
// In CPython, they would have their custom tp_dealloc, except that we don't
// need them in Pyston due to GC.
//
// What's the problem with having subtype_dealloc? In some cases like defdict_dealloc,
// the destructor calls another destructor thinking it's the parent, but ends up in the
// same destructor again (since child destructors are found first by subtype_dealloc)
// causing an infinite recursion loop.
tp_dealloc = dealloc_null;
has_safe_tp_dealloc = true;
}
tp_free = default_free;
if (gc_visit == NULL) {
assert(base);
......@@ -5006,17 +5067,26 @@ Box* typeNew(Box* _cls, Box* arg1, Box* arg2, Box** _args) {
else
made->tp_alloc = PyType_GenericAlloc;
// On some occasions, Python-implemented classes inherit from C-implement classes. For
// example, KeyedRef inherits from weakref, and needs to have it's finalizer called
// whenever weakref would. So we inherit the property that a class has a safe tp_dealloc
// too. However, we must be careful to do that only when nothing else invalidates that
// property, such as the presence of a __del__ (tp_del) method.
assert(!made->has_safe_tp_dealloc);
if (!made->tp_del) {
for (auto b : *bases) {
BoxedClass* base = static_cast<BoxedClass*>(b);
if (!isSubclass(base->cls, type_cls))
continue;
if (base->tp_del) {
break;
}
if (base->has_safe_tp_dealloc) {
made->tp_dealloc = base->tp_dealloc;
made->has_safe_tp_dealloc = true;
break;
}
}
}
return made;
}
......
......@@ -3029,6 +3029,12 @@ static void setupDefaultClassGCParticipation() {
setTypeGCNone(&Match_Type);
setTypeGCNone(&Pattern_Type);
setTypeGCNone(&PyCallIter_Type);
// We just changed the has_safe_tp_dealloc field on a few classes, changing
// them from having an ordered finalizer to an unordered one.
// If some instances of those classes have already been allocated (e.g.
// preallocated exceptions), they need to be invalidated.
gc::invalidateOrderedFinalizerList();
}
bool TRACK_ALLOCATIONS = false;
......
......@@ -203,8 +203,10 @@ public:
// 3) Won't take up a lot of memory (requiring another GC run).
// 4) Won't resurrect itself.
//
// We specify that such destructors are safe for optimization purposes. We call the tp_dealloc
// as the object gets freed.
// We specify that such destructors are safe for optimization purposes (in our GC, we try to
// emulate the order of destructor calls and support resurrection by calling them in topological
// order through multiple GC passes, which is potentially quite expensive). We call the tp_dealloc
// as the object gets freed rather than put it in a pending finalizer list.
bool has_safe_tp_dealloc;
// Whether this class object is constant or not, ie whether or not class-level
......@@ -246,6 +248,9 @@ public:
return true;
}
// Checks if this class or one of its parents has a non-default tp_dealloc
bool hasNonDefaultTpDealloc();
void freeze();
protected:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment