Commit e0bd51eb authored by Kevin Modzelewski's avatar Kevin Modzelewski

Temporary fix for freeing-classes-before-instances bug

Was running into it, so just added a quick fix that should hopefully
get us through until rudi gets in his real fix.

This commit just delays freeing the class objects until the end of the
sweep.  It doesn't deal with a number of corner cases such as if
a class and a metaclass get freed at the same time (they both get delayed
to the same point), or if we free a class with weakrefs to it (it just
gets treated like a normal weakreferent).
parent f6b3c256
......@@ -375,10 +375,10 @@ void markPhase() {
#endif
}
static void sweepPhase(std::vector<Box*>& weakly_referenced) {
static void sweepPhase(std::vector<Box*>& weakly_referenced, std::vector<BoxedClass*>& classes_to_free) {
// we need to use the allocator here because these objects are referenced only here, and calling the weakref
// callbacks could start another gc
global_heap.freeUnmarked(weakly_referenced);
global_heap.freeUnmarked(weakly_referenced, classes_to_free);
}
static bool gc_enabled = true;
......@@ -433,7 +433,17 @@ void runCollection() {
// since the deallocation of other objects (namely, the weakref objects themselves) can affect
// those lists, and we want to see the final versions.
std::vector<Box*> weakly_referenced;
sweepPhase(weakly_referenced);
// Temporary solution to the "we can't free classes before their instances": the sweep phase
// will avoid freeing classes, and will instead put them into this list for us to free at the end.
// XXX there are still corner cases with it:
// - there is no ordering enforced between different class objects, ie if you free a class and a metaclass
// in the same collection we have the same issue
// - if there are weakreferences to the class, it gets freed slightly earlier
// These could both be fixed but I think the full fix will come with rudi's larger finalization changes.
std::vector<BoxedClass*> classes_to_free;
sweepPhase(weakly_referenced, classes_to_free);
// Handle weakrefs in two passes:
// - first, find all of the weakref objects whose callbacks we need to call. we need to iterate
......@@ -458,7 +468,11 @@ void runCollection() {
weak_references.push_back(head);
}
}
global_heap.free(GCAllocation::fromUserData(o));
global_heap._setFree(GCAllocation::fromUserData(o));
}
for (auto b : classes_to_free) {
global_heap._setFree(GCAllocation::fromUserData(b));
}
should_not_reenter_gc = false; // end non-reentrant section
......
......@@ -43,7 +43,7 @@ template <> void return_temporary_buffer<pyston::Box*>(pyston::Box** p) {
namespace pyston {
namespace gc {
bool _doFree(GCAllocation* al, std::vector<Box*>* weakly_referenced);
bool _doFree(GCAllocation* al, std::vector<Box*>* weakly_referenced, std::vector<BoxedClass*>* classes_to_free);
// lots of linked lists around here, so let's just use template functions for operations on them.
template <class ListT> inline void nullNextPrev(ListT* node) {
......@@ -86,7 +86,8 @@ template <class ListT, typename Func> inline void forEach(ListT* list, Func func
}
template <class ListT, typename Free>
inline void sweepList(ListT* head, std::vector<Box*>& weakly_referenced, Free free_func) {
inline void sweepList(ListT* head, std::vector<Box*>& weakly_referenced, std::vector<BoxedClass*>& classes_to_free,
Free free_func) {
auto cur = head;
while (cur) {
GCAllocation* al = cur->data;
......@@ -94,7 +95,7 @@ inline void sweepList(ListT* head, std::vector<Box*>& weakly_referenced, Free fr
clearMark(al);
cur = cur->next;
} else {
if (_doFree(al, &weakly_referenced)) {
if (_doFree(al, &weakly_referenced, &classes_to_free)) {
removeFromLL(cur);
auto to_free = cur;
......@@ -122,7 +123,8 @@ void _bytesAllocatedTripped() {
Heap global_heap;
__attribute__((always_inline)) bool _doFree(GCAllocation* al, std::vector<Box*>* weakly_referenced) {
__attribute__((always_inline)) bool _doFree(GCAllocation* al, std::vector<Box*>* weakly_referenced,
std::vector<BoxedClass*>* classes_to_free) {
#ifndef NVALGRIND
VALGRIND_DISABLE_ERROR_REPORTING;
#endif
......@@ -149,6 +151,13 @@ __attribute__((always_inline)) bool _doFree(GCAllocation* al, std::vector<Box*>*
}
}
// Note: do this check after the weakrefs check.
if (PyType_Check(b)) {
assert(classes_to_free);
classes_to_free->push_back(static_cast<BoxedClass*>(b));
return false;
}
// XXX: we are currently ignoring destructors (tp_dealloc) for extension objects, since we have
// historically done that (whoops) and there are too many to be worth changing for now as long
// as we can get real destructor support soon.
......@@ -161,7 +170,7 @@ __attribute__((always_inline)) bool _doFree(GCAllocation* al, std::vector<Box*>*
}
void Heap::destructContents(GCAllocation* al) {
_doFree(al, NULL);
_doFree(al, NULL, NULL);
}
struct HeapStatistics {
......@@ -369,8 +378,8 @@ GCAllocation* SmallArena::allocationFrom(void* ptr) {
return reinterpret_cast<GCAllocation*>(&b->atoms[atom_idx]);
}
void SmallArena::freeUnmarked(std::vector<Box*>& weakly_referenced) {
thread_caches.forEachValue([this, &weakly_referenced](ThreadBlockCache* cache) {
void SmallArena::freeUnmarked(std::vector<Box*>& weakly_referenced, std::vector<BoxedClass*>& classes_to_free) {
thread_caches.forEachValue([this, &weakly_referenced, &classes_to_free](ThreadBlockCache* cache) {
for (int bidx = 0; bidx < NUM_BUCKETS; bidx++) {
Block* h = cache->cache_free_heads[bidx];
// Try to limit the amount of unused memory a thread can hold onto;
......@@ -390,8 +399,8 @@ void SmallArena::freeUnmarked(std::vector<Box*>& weakly_referenced) {
insertIntoLL(&heads[bidx], h);
}
Block** chain_end = _freeChain(&cache->cache_free_heads[bidx], weakly_referenced);
_freeChain(&cache->cache_full_heads[bidx], weakly_referenced);
Block** chain_end = _freeChain(&cache->cache_free_heads[bidx], weakly_referenced, classes_to_free);
_freeChain(&cache->cache_full_heads[bidx], weakly_referenced, classes_to_free);
while (Block* b = cache->cache_full_heads[bidx]) {
removeFromLLAndNull(b);
......@@ -401,8 +410,8 @@ void SmallArena::freeUnmarked(std::vector<Box*>& weakly_referenced) {
});
for (int bidx = 0; bidx < NUM_BUCKETS; bidx++) {
Block** chain_end = _freeChain(&heads[bidx], weakly_referenced);
_freeChain(&full_heads[bidx], weakly_referenced);
Block** chain_end = _freeChain(&heads[bidx], weakly_referenced, classes_to_free);
_freeChain(&full_heads[bidx], weakly_referenced, classes_to_free);
while (Block* b = full_heads[bidx]) {
removeFromLLAndNull(b);
......@@ -429,7 +438,8 @@ void SmallArena::getStatistics(HeapStatistics* stats) {
}
SmallArena::Block** SmallArena::_freeChain(Block** head, std::vector<Box*>& weakly_referenced) {
SmallArena::Block** SmallArena::_freeChain(Block** head, std::vector<Box*>& weakly_referenced,
std::vector<BoxedClass*>& classes_to_free) {
while (Block* b = *head) {
int num_objects = b->numObjects();
int first_obj = b->minObjIndex();
......@@ -453,7 +463,7 @@ SmallArena::Block** SmallArena::_freeChain(Block** head, std::vector<Box*>& weak
if (isMarked(al)) {
clearMark(al);
} else {
if (_doFree(al, &weakly_referenced)) {
if (_doFree(al, &weakly_referenced, &classes_to_free)) {
b->isfree.set(atom_idx);
#ifndef NDEBUG
memset(al->user_data, 0xbb, b->size - sizeof(GCAllocation));
......@@ -664,8 +674,8 @@ GCAllocation* LargeArena::allocationFrom(void* ptr) {
return NULL;
}
void LargeArena::freeUnmarked(std::vector<Box*>& weakly_referenced) {
sweepList(head, weakly_referenced, [this](LargeObj* ptr) { _freeLargeObj(ptr); });
void LargeArena::freeUnmarked(std::vector<Box*>& weakly_referenced, std::vector<BoxedClass*>& classes_to_free) {
sweepList(head, weakly_referenced, classes_to_free, [this](LargeObj* ptr) { _freeLargeObj(ptr); });
}
void LargeArena::getStatistics(HeapStatistics* stats) {
......@@ -857,8 +867,8 @@ GCAllocation* HugeArena::allocationFrom(void* ptr) {
return NULL;
}
void HugeArena::freeUnmarked(std::vector<Box*>& weakly_referenced) {
sweepList(head, weakly_referenced, [this](HugeObj* ptr) { _freeHugeObj(ptr); });
void HugeArena::freeUnmarked(std::vector<Box*>& weakly_referenced, std::vector<BoxedClass*>& classes_to_free) {
sweepList(head, weakly_referenced, classes_to_free, [this](HugeObj* ptr) { _freeHugeObj(ptr); });
}
void HugeArena::getStatistics(HeapStatistics* stats) {
......
......@@ -223,7 +223,7 @@ public:
void free(GCAllocation* al);
GCAllocation* allocationFrom(void* ptr);
void freeUnmarked(std::vector<Box*>& weakly_referenced);
void freeUnmarked(std::vector<Box*>& weakly_referenced, std::vector<BoxedClass*>& classes_to_free);
void getStatistics(HeapStatistics* stats);
......@@ -355,7 +355,7 @@ private:
Block* _allocBlock(uint64_t size, Block** prev);
GCAllocation* _allocFromBlock(Block* b);
Block* _claimBlock(size_t rounded_size, Block** free_head);
Block** _freeChain(Block** head, std::vector<Box*>& weakly_referenced);
Block** _freeChain(Block** head, std::vector<Box*>& weakly_referenced, std::vector<BoxedClass*>& classes_to_free);
void _getChainStatistics(HeapStatistics* stats, Block** head);
GCAllocation* __attribute__((__malloc__)) _alloc(size_t bytes, int bucket_idx);
......@@ -428,7 +428,7 @@ public:
void free(GCAllocation* alloc);
GCAllocation* allocationFrom(void* ptr);
void freeUnmarked(std::vector<Box*>& weakly_referenced);
void freeUnmarked(std::vector<Box*>& weakly_referenced, std::vector<BoxedClass*>& classes_to_free);
void getStatistics(HeapStatistics* stats);
};
......@@ -446,7 +446,7 @@ public:
void free(GCAllocation* alloc);
GCAllocation* allocationFrom(void* ptr);
void freeUnmarked(std::vector<Box*>& weakly_referenced);
void freeUnmarked(std::vector<Box*>& weakly_referenced, std::vector<BoxedClass*>& classes_to_free);
void getStatistics(HeapStatistics* stats);
......@@ -529,18 +529,7 @@ public:
void free(GCAllocation* alloc) {
destructContents(alloc);
if (large_arena.contains(alloc)) {
large_arena.free(alloc);
return;
}
if (huge_arena.contains(alloc)) {
huge_arena.free(alloc);
return;
}
assert(small_arena.contains(alloc));
small_arena.free(alloc);
_setFree(alloc);
}
// not thread safe:
......@@ -557,15 +546,34 @@ public:
}
// not thread safe:
void freeUnmarked(std::vector<Box*>& weakly_referenced) {
small_arena.freeUnmarked(weakly_referenced);
large_arena.freeUnmarked(weakly_referenced);
huge_arena.freeUnmarked(weakly_referenced);
void freeUnmarked(std::vector<Box*>& weakly_referenced, std::vector<BoxedClass*>& classes_to_free) {
small_arena.freeUnmarked(weakly_referenced, classes_to_free);
large_arena.freeUnmarked(weakly_referenced, classes_to_free);
huge_arena.freeUnmarked(weakly_referenced, classes_to_free);
}
void dumpHeapStatistics(int level);
private:
// Internal function that just marks the allocation as being freed, without doing any
// Python-semantics on it.
void _setFree(GCAllocation* alloc) {
if (large_arena.contains(alloc)) {
large_arena.free(alloc);
return;
}
if (huge_arena.contains(alloc)) {
huge_arena.free(alloc);
return;
}
assert(small_arena.contains(alloc));
small_arena.free(alloc);
}
friend void markPhase();
friend void runCollection();
};
extern Heap global_heap;
......
# Since gc-related metadata is (currently) placed on classes,
# it's tricky for us if a class object becomes garbage in the same
# collection as some of its instances. If we try to collect the class
# first, we will not have the metadata any more on how to collect the instances.
def f():
class C(object):
pass
for i in xrange(1000):
pass
for j in xrange(1000):
f()
import gc
gc.collect()
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment