Commit 7b84d99d authored by Kevin Modzelewski's avatar Kevin Modzelewski

Merge pull request #889 from rudi-c/movingmerge

Optionally move objects around in memory to prepare Pyston for a moving collector.
parents db991b38 8a510e30
......@@ -28,7 +28,7 @@ typedef int (*update_callback)(PyTypeObject*, void*);
PyObject* tp_new_wrapper(PyTypeObject* self, BoxedTuple* args, Box* kwds) noexcept;
extern "C" void conservativeGCHandler(GCVisitor* v, Box* b) noexcept {
v->visitPotentialRange((void* const*)b, (void* const*)((char*)b + b->cls->tp_basicsize));
v->visitPotentialRange((void**)b, (void**)((char*)b + b->cls->tp_basicsize));
}
extern "C" void conservativeAndBasesGCHandler(GCVisitor* v, Box* b) noexcept {
......
......@@ -68,8 +68,8 @@ public:
BoxedCApiFunction* o = static_cast<BoxedCApiFunction*>(_o);
Box::gcHandler(v, o);
v->visit(o->passthrough);
v->visit(o->module);
v->visit(&o->passthrough);
v->visit(&o->module);
}
};
static_assert(sizeof(BoxedCApiFunction) == sizeof(PyCFunctionObject), "");
......
......@@ -580,9 +580,12 @@ public:
PythonUnwindSession* o = static_cast<PythonUnwindSession*>(_o);
v->visitIf(o->exc_info.type);
v->visitIf(o->exc_info.value);
v->visitIf(o->exc_info.traceback);
if (o->exc_info.type)
v->visit(&o->exc_info.type);
if (o->exc_info.value)
v->visit(&o->exc_info.value);
if (o->exc_info.traceback)
v->visit(&o->exc_info.traceback);
}
};
static __thread PythonUnwindSession* cur_unwind;
......
......@@ -125,16 +125,16 @@ public:
void accept(gc::GCVisitor* v) {
auto pub_state = public_thread_state;
if (pub_state->curexc_type)
v->visit(pub_state->curexc_type);
v->visit(&pub_state->curexc_type);
if (pub_state->curexc_value)
v->visit(pub_state->curexc_value);
v->visit(&pub_state->curexc_value);
if (pub_state->curexc_traceback)
v->visit(pub_state->curexc_traceback);
v->visit(&pub_state->curexc_traceback);
if (pub_state->dict)
v->visit(pub_state->dict);
v->visit(&pub_state->dict);
for (auto& stack_info : previous_stacks) {
v->visit(stack_info.next_generator);
v->visit(&stack_info.next_generator);
#if STACK_GROWS_DOWN
v->visitPotentialRange((void**)stack_info.stack_limit, (void**)stack_info.stack_start);
#else
......
......@@ -30,6 +30,7 @@
#include "runtime/hiddenclass.h"
#include "runtime/objmodel.h"
#include "runtime/types.h"
#include "runtime/util.h"
#ifndef NVALGRIND
#include "valgrind.h"
......@@ -62,19 +63,17 @@ static std::unordered_set<GCRootHandle*>* getRootHandles() {
return &root_handles;
}
static int ncollections = 0;
int ncollections = 0;
static bool gc_enabled = true;
static bool should_not_reenter_gc = false;
enum TraceStackType {
MarkPhase,
FinalizationOrderingFindReachable,
FinalizationOrderingRemoveTemporaries,
};
class TraceStack {
private:
// This is basically a stack. However, for optimization purposes,
// blocks of memory are allocated at once when things need to be pushed.
//
// For performance, this should not have virtual methods.
class ChunkedStack {
protected:
const int CHUNK_SIZE = 256;
const int MAX_FREE_CHUNKS = 50;
......@@ -85,8 +84,6 @@ private:
void** start;
void** end;
TraceStackType visit_type;
void get_chunk() {
if (free_chunks.size()) {
start = free_chunks.back();
......@@ -98,12 +95,14 @@ private:
cur = start;
end = start + CHUNK_SIZE;
}
void release_chunk(void** chunk) {
if (free_chunks.size() == MAX_FREE_CHUNKS)
free(chunk);
else
free_chunks.push_back(chunk);
}
void pop_chunk() {
start = chunks.back();
chunks.pop_back();
......@@ -111,29 +110,79 @@ private:
cur = end;
}
public:
TraceStack(TraceStackType type) : visit_type(type) { get_chunk(); }
TraceStack(TraceStackType type, const std::unordered_set<void*>& roots) : visit_type(type) {
get_chunk();
for (void* p : roots) {
ASSERT(!isMarked(GCAllocation::fromUserData(p)), "");
push(p);
void* pop_chunk_and_item() {
release_chunk(start);
if (chunks.size()) {
pop_chunk();
assert(cur == end);
return *--cur; // no need for any bounds checks here since we're guaranteed we're CHUNK_SIZE from the start
} else {
// We emptied the stack, but we should prepare a new chunk in case another item
// gets added onto the stack.
get_chunk();
return NULL;
}
}
~TraceStack() {
RELEASE_ASSERT(end - cur == CHUNK_SIZE, "destroying non-empty TraceStack");
// We always have a block available in case we want to push items onto the TraceStack,
public:
ChunkedStack() { get_chunk(); }
~ChunkedStack() {
RELEASE_ASSERT(end - cur == CHUNK_SIZE, "destroying non-empty ChunkedStack");
// We always have a block available in case we want to push items onto the TraversalWorklist,
// but that chunk needs to be released after use to avoid a memory leak.
release_chunk(start);
}
void* pop() {
if (cur > start)
return *--cur;
return pop_chunk_and_item();
}
void push(void* p) {
*cur++ = p;
if (cur == end) {
chunks.push_back(start);
get_chunk();
}
}
};
std::vector<void**> ChunkedStack::free_chunks;
enum TraversalType {
MarkPhase,
FinalizationOrderingFindReachable,
FinalizationOrderingRemoveTemporaries,
MapReferencesPhase,
};
class Worklist {
protected:
ChunkedStack stack;
public:
void* next() { return stack.pop(); }
};
class TraversalWorklist : public Worklist {
TraversalType visit_type;
public:
TraversalWorklist(TraversalType type) : visit_type(type) {}
TraversalWorklist(TraversalType type, const std::unordered_set<void*>& roots) : TraversalWorklist(type) {
for (void* p : roots) {
ASSERT(!isMarked(GCAllocation::fromUserData(p)), "");
addWork(p);
}
}
void addWork(void* p) {
GC_TRACE_LOG("Pushing %p\n", p);
GCAllocation* al = GCAllocation::fromUserData(p);
switch (visit_type) {
case TraceStackType::MarkPhase:
case TraversalType::MarkPhase:
// Use this to print the directed edges of the GC graph traversal.
// i.e. print every a -> b where a is a pointer and b is something a references
#if 0
......@@ -162,7 +211,7 @@ public:
break;
// See PyPy's finalization ordering algorithm:
// http://pypy.readthedocs.org/en/latest/discussion/finalizer-order.html
case TraceStackType::FinalizationOrderingFindReachable:
case TraversalType::FinalizationOrderingFindReachable:
if (orderingState(al) == FinalizationState::UNREACHABLE) {
setOrderingState(al, FinalizationState::TEMPORARY);
} else if (orderingState(al) == FinalizationState::REACHABLE_FROM_FINALIZER) {
......@@ -171,7 +220,7 @@ public:
return;
}
break;
case TraceStackType::FinalizationOrderingRemoveTemporaries:
case TraversalType::FinalizationOrderingRemoveTemporaries:
if (orderingState(al) == FinalizationState::TEMPORARY) {
setOrderingState(al, FinalizationState::REACHABLE_FROM_FINALIZER);
} else {
......@@ -182,36 +231,64 @@ public:
assert(false);
}
*cur++ = p;
if (cur == end) {
chunks.push_back(start);
get_chunk();
}
stack.push(p);
}
};
void* pop_chunk_and_item() {
release_chunk(start);
if (chunks.size()) {
pop_chunk();
assert(cur == end);
return *--cur; // no need for any bounds checks here since we're guaranteed we're CHUNK_SIZE from the start
} else {
// We emptied the stack, but we should prepare a new chunk in case another item
// gets added onto the stack.
get_chunk();
return NULL;
#if MOVING_GC
class ReferenceMapWorklist : public Worklist {
ReferenceMap* refmap;
public:
ReferenceMapWorklist(ReferenceMap* refmap) : refmap(refmap) {}
ReferenceMapWorklist(ReferenceMap* refmap, const std::unordered_set<void*>& roots) : refmap(refmap) {
for (void* p : roots) {
addWork(GCAllocation::fromUserData(p), NULL);
}
}
void addWork(GCAllocation* al, GCAllocation* source) {
assert(refmap);
void* pop() {
if (cur > start)
return *--cur;
auto it = refmap->references.find(al);
if (it == refmap->references.end()) {
refmap->references.emplace(al, std::vector<GCAllocation*>());
auto& vec = refmap->references[al];
return pop_chunk_and_item();
if (source) {
// We found that there exists a pointer from `source` to `al`
vec.push_back(source);
} else {
// No source => this is a root. We should pin roots.
refmap->pinned.emplace(al);
}
// Pin these types of objects - they are likely to be untracked at
// this time.
if (al->kind_id == GCKind::RUNTIME) {
pin(al);
} else if (al->kind_id == GCKind::PYTHON) {
Box* b = (Box*)al->user_data;
if (b->cls == type_cls || b->cls == module_cls) {
pin(al);
}
}
stack.push(al->user_data);
} else {
if (source) {
// We found that there exists a pointer from `source` to `al`
it->second.push_back(source);
} else {
// No source => this is a root. We should pin roots.
pin(al);
}
}
}
void pin(GCAllocation* al) { refmap->pinned.emplace(al); }
};
std::vector<void**> TraceStack::free_chunks;
#endif
void registerPermanentRoot(void* obj, bool allow_duplicates) {
assert(global_heap.getAllocationFromInteriorPointer(obj));
......@@ -313,6 +390,41 @@ void invalidateOrderedFinalizerList() {
sc_us.log(us);
}
__attribute__((always_inline)) void visitByGCKind(void* p, GCVisitor& visitor) {
assert(((intptr_t)p) % 8 == 0);
GCAllocation* al = GCAllocation::fromUserData(p);
visitor.setSource(al);
GCKind kind_id = al->kind_id;
if (kind_id == GCKind::UNTRACKED) {
// Nothing to do here.
} else if (kind_id == GCKind::CONSERVATIVE) {
uint32_t bytes = al->kind_data;
visitor.visitPotentialRange((void**)p, (void**)((char*)p + bytes));
} else if (kind_id == GCKind::PRECISE) {
uint32_t bytes = al->kind_data;
visitor.visitRange((void**)p, (void**)((char*)p + bytes));
} else if (kind_id == GCKind::PYTHON) {
Box* b = reinterpret_cast<Box*>(p);
BoxedClass* cls = b->cls;
if (cls) {
// The cls can be NULL since we use 'new' to construct them.
// An arbitrary amount of stuff can happen between the 'new' and
// the call to the constructor (ie the args get evaluated), which
// can trigger a collection.
ASSERT(cls->gc_visit, "%s", getTypeName(b));
cls->gc_visit(&visitor, b);
}
} else if (kind_id == GCKind::RUNTIME) {
GCAllocatedRuntime* runtime_obj = reinterpret_cast<GCAllocatedRuntime*>(p);
runtime_obj->gc_visit(&visitor);
} else {
RELEASE_ASSERT(0, "Unhandled kind: %d", (int)kind_id);
}
}
GCRootHandle::GCRootHandle() {
getRootHandles()->insert(this);
}
......@@ -320,17 +432,18 @@ GCRootHandle::~GCRootHandle() {
getRootHandles()->erase(this);
}
void GCVisitor::visit(void* p) {
void GCVisitor::_visit(void** ptr_address) {
void* p = *ptr_address;
if ((uintptr_t)p < SMALL_ARENA_START || (uintptr_t)p >= HUGE_ARENA_START + ARENA_SIZE) {
ASSERT(!p || isNonheapRoot(p), "%p", p);
return;
}
ASSERT(global_heap.getAllocationFromInteriorPointer(p)->user_data == p, "%p", p);
stack->push(p);
worklist->addWork(p);
}
void GCVisitor::visitRange(void* const* start, void* const* end) {
void GCVisitor::_visitRange(void** start, void** end) {
ASSERT((const char*)end - (const char*)start <= 1000000000, "Asked to scan %.1fGB -- a bug?",
((const char*)end - (const char*)start) * 1.0 / (1 << 30));
......@@ -338,7 +451,7 @@ void GCVisitor::visitRange(void* const* start, void* const* end) {
assert((uintptr_t)end % sizeof(void*) == 0);
while (start < end) {
visit(*start);
visit(start);
start++;
}
}
......@@ -346,11 +459,11 @@ void GCVisitor::visitRange(void* const* start, void* const* end) {
void GCVisitor::visitPotential(void* p) {
GCAllocation* a = global_heap.getAllocationFromInteriorPointer(p);
if (a) {
visit(a->user_data);
worklist->addWork(a->user_data);
}
}
void GCVisitor::visitPotentialRange(void* const* start, void* const* end) {
void GCVisitor::visitPotentialRange(void** start, void** end) {
ASSERT((const char*)end - (const char*)start <= 1000000000, "Asked to scan %.1fGB -- a bug?",
((const char*)end - (const char*)start) * 1.0 / (1 << 30));
......@@ -374,62 +487,56 @@ void GCVisitor::visitPotentialRange(void* const* start, void* const* end) {
}
}
static __attribute__((always_inline)) void visitByGCKind(void* p, GCVisitor& visitor) {
assert(((intptr_t)p) % 8 == 0);
#if MOVING_GC
void GCVisitorPinning::_visit(void** ptr_address) {
void* p = *ptr_address;
if ((uintptr_t)p < SMALL_ARENA_START || (uintptr_t)p >= HUGE_ARENA_START + ARENA_SIZE) {
ASSERT(!p || isNonheapRoot(p), "%p", p);
return;
}
GCAllocation* al = GCAllocation::fromUserData(p);
GCAllocation* al = global_heap.getAllocationFromInteriorPointer(p);
ASSERT(al->user_data == p, "%p", p);
worklist->addWork(al, source);
}
GCKind kind_id = al->kind_id;
if (kind_id == GCKind::UNTRACKED) {
// Nothing to do here.
} else if (kind_id == GCKind::CONSERVATIVE) {
uint32_t bytes = al->kind_data;
visitor.visitPotentialRange((void**)p, (void**)((char*)p + bytes));
} else if (kind_id == GCKind::PRECISE) {
uint32_t bytes = al->kind_data;
visitor.visitRange((void**)p, (void**)((char*)p + bytes));
} else if (kind_id == GCKind::PYTHON) {
Box* b = reinterpret_cast<Box*>(p);
BoxedClass* cls = b->cls;
void GCVisitorPinning::visitPotential(void* p) {
GCAllocation* a = global_heap.getAllocationFromInteriorPointer(p);
if (a) {
worklist->pin(a);
worklist->addWork(a, source);
}
}
if (cls) {
// The cls can be NULL since we use 'new' to construct them.
// An arbitrary amount of stuff can happen between the 'new' and
// the call to the constructor (ie the args get evaluated), which
// can trigger a collection.
ASSERT(cls->gc_visit, "%s", getTypeName(b));
cls->gc_visit(&visitor, b);
}
} else if (kind_id == GCKind::RUNTIME) {
GCAllocatedRuntime* runtime_obj = reinterpret_cast<GCAllocatedRuntime*>(p);
runtime_obj->gc_visit(&visitor);
} else {
RELEASE_ASSERT(0, "Unhandled kind: %d", (int)kind_id);
void GCVisitorReplacing::_visit(void** ptr_address) {
if (*ptr_address == old_value) {
*ptr_address = new_value;
}
}
#endif
static void markRoots(GCVisitor& visitor) {
static void visitRoots(GCVisitor& visitor) {
GC_TRACE_LOG("Looking at the stack\n");
threading::visitAllStacks(&visitor);
GC_TRACE_LOG("Looking at root handles\n");
for (auto h : *getRootHandles()) {
visitor.visit(h->value);
visitor.visit(&h->value);
}
GC_TRACE_LOG("Looking at potential root ranges\n");
for (auto& e : potential_root_ranges) {
visitor.visitPotentialRange((void* const*)e.first, (void* const*)e.second);
visitor.visitPotentialRange((void**)e.first, (void**)e.second);
}
GC_TRACE_LOG("Looking at pending finalization list\n");
for (auto box : pending_finalization_list) {
visitor.visit(box);
visitor.visit(&box);
}
GC_TRACE_LOG("Looking at weakrefs needing callbacks list\n");
for (auto weakref : weakrefs_needing_callback_list) {
visitor.visit(weakref);
visitor.visit(&weakref);
}
GC_TRACE_LOG("Looking at generated code pointers\n");
......@@ -444,11 +551,11 @@ static void finalizationOrderingFindReachable(Box* obj) {
static StatCounter sc_us("us_gc_mark_finalizer_ordering_1");
Timer _t("finalizationOrderingFindReachable", /*min_usec=*/10000);
TraceStack stack(TraceStackType::FinalizationOrderingFindReachable);
GCVisitor visitor(&stack);
TraversalWorklist worklist(TraversalType::FinalizationOrderingFindReachable);
GCVisitor visitor(&worklist);
stack.push(obj);
while (void* p = stack.pop()) {
worklist.addWork(obj);
while (void* p = worklist.next()) {
sc_marked_objs.log();
visitByGCKind(p, visitor);
......@@ -462,11 +569,11 @@ static void finalizationOrderingRemoveTemporaries(Box* obj) {
static StatCounter sc_us("us_gc_mark_finalizer_ordering_2");
Timer _t("finalizationOrderingRemoveTemporaries", /*min_usec=*/10000);
TraceStack stack(TraceStackType::FinalizationOrderingRemoveTemporaries);
GCVisitor visitor(&stack);
TraversalWorklist worklist(TraversalType::FinalizationOrderingRemoveTemporaries);
GCVisitor visitor(&worklist);
stack.push(obj);
while (void* p = stack.pop()) {
worklist.addWork(obj);
while (void* p = worklist.next()) {
GCAllocation* al = GCAllocation::fromUserData(p);
assert(orderingState(al) != FinalizationState::UNREACHABLE);
visitByGCKind(p, visitor);
......@@ -512,12 +619,12 @@ static void orderFinalizers() {
sc_us.log(us);
}
static void graphTraversalMarking(TraceStack& stack, GCVisitor& visitor) {
static void graphTraversalMarking(Worklist& worklist, GCVisitor& visitor) {
static StatCounter sc_us("us_gc_mark_phase_graph_traversal");
static StatCounter sc_marked_objs("gc_marked_object_count");
Timer _t("traversing", /*min_usec=*/10000);
while (void* p = stack.pop()) {
while (void* p = worklist.next()) {
sc_marked_objs.log();
GCAllocation* al = GCAllocation::fromUserData(p);
......@@ -529,7 +636,9 @@ static void graphTraversalMarking(TraceStack& stack, GCVisitor& visitor) {
GC_TRACE_LOG("Looking at non-python allocation %p\n", p);
#endif
assert(isMarked(al));
// Won't work once we visit objects in more ways than just marking them.
assert(isMarked(al) || MOVING_GC);
visitByGCKind(p, visitor);
}
......@@ -645,12 +754,12 @@ static void markPhase() {
GC_TRACE_LOG("Starting collection %d\n", ncollections);
GC_TRACE_LOG("Looking at roots\n");
TraceStack stack(TraceStackType::MarkPhase, roots);
GCVisitor visitor(&stack);
TraversalWorklist worklist(TraversalType::MarkPhase, roots);
GCVisitor visitor(&worklist);
markRoots(visitor);
visitRoots(visitor);
graphTraversalMarking(stack, visitor);
graphTraversalMarking(worklist, visitor);
// Objects with finalizers cannot be freed in any order. During the call to a finalizer
// of an object, the finalizer expects the object's references to still point to valid
......@@ -678,6 +787,98 @@ static void sweepPhase(std::vector<Box*>& weakly_referenced) {
sc_us.log(us);
}
static void mapReferencesPhase(ReferenceMap& refmap) {
#if MOVING_GC
ReferenceMapWorklist worklist(&refmap, roots);
GCVisitorPinning visitor(&worklist);
visitRoots(visitor);
for (auto obj : objects_with_ordered_finalizers) {
visitor.visit((void**)&obj);
}
graphTraversalMarking(worklist, visitor);
#endif
}
#if MOVING_GC
#define MOVE_LOG 1
static FILE* move_log;
static void move(ReferenceMap& refmap, GCAllocation* old_al, size_t size) {
#if MOVE_LOG
if (!move_log) {
move_log = fopen("movelog.txt", "w");
}
#endif
// Only move objects that are in the reference map (unreachable objects
// won't be in the reference map).
if (refmap.pinned.count(old_al) == 0 && refmap.references.count(old_al) > 0) {
auto& referencing = refmap.references[old_al];
assert(referencing.size() > 0);
GCAllocation* new_al = global_heap.forceRelocate(old_al);
assert(new_al);
assert(old_al->user_data != new_al->user_data);
#if MOVE_LOG
// Write the moves that have happened to file, for debugging.
fprintf(move_log, "%d) %p -> %p\n", ncollections, old_al->user_data, new_al->user_data);
#endif
for (GCAllocation* referencer : referencing) {
// If the whatever is pointing to the object we just moved has also been moved,
// then we need to update the pointer in that moved object.
if (refmap.moves.count(referencer) > 0) {
referencer = refmap.moves[referencer];
}
#if MOVE_LOG
fprintf(move_log, " | referencer %p\n", referencer->user_data);
#endif
assert(referencer->kind_id == GCKind::PYTHON || referencer->kind_id == GCKind::PRECISE
|| referencer->kind_id == GCKind::RUNTIME);
GCVisitorReplacing replacer(old_al->user_data, new_al->user_data);
visitByGCKind(referencer->user_data, replacer);
}
assert(refmap.moves.count(old_al) == 0);
refmap.moves.emplace(old_al, new_al);
} else if (refmap.pinned.count(old_al) == 0) {
// TODO: This probably should not happen.
}
}
#endif
// Move objects around memory randomly. The purpose is to test whether the rest
// of the program is able to support a moving collector (e.g. if all pointers are
// being properly scanned by the GC).
//
// The way it works is very simple.
// 1) Perform a mark phase where for every object, make a list of the location of
// all pointers to that object (make a reference map).
// Pin certain types of objects as necessary (e.g. conservatively scanned).
// 2) Reallocate all non-pinned object. Update the value for every pointer locations
// from the map built in (1)
static void testMoving() {
#if MOVING_GC
global_heap.prepareForCollection();
ReferenceMap refmap;
mapReferencesPhase(refmap);
// Reallocate (aka 'move') all objects in the small heap to a different
// location. This is not useful in terms of performance, but it is useful
// to check if the rest of the program is able to support moving collectors.
global_heap.forEachSmallArenaReference([&refmap](GCAllocation* al, size_t size) { move(refmap, al, size); });
global_heap.cleanupAfterCollection();
#endif
}
bool gcIsEnabled() {
return gc_enabled;
}
......@@ -761,6 +962,12 @@ void runCollection() {
global_heap.free(GCAllocation::fromUserData(o));
}
global_heap.cleanupAfterCollection();
#if MOVING_GC
testMoving();
#endif
#if TRACE_GC_MARKING
fclose(trace_fp);
trace_fp = NULL;
......@@ -768,8 +975,6 @@ void runCollection() {
should_not_reenter_gc = false; // end non-reentrant section
global_heap.cleanupAfterCollection();
if (VERBOSITY("gc") >= 2)
printf("Collection #%d done\n\n", ncollections);
......
......@@ -15,6 +15,10 @@
#ifndef PYSTON_GC_COLLECTOR_H
#define PYSTON_GC_COLLECTOR_H
#include <unordered_map>
#include <unordered_set>
#include <vector>
#include "gc/gc.h"
namespace pyston {
......@@ -55,6 +59,8 @@ bool isNonheapRoot(void* p);
void registerPythonObject(Box* b);
void invalidateOrderedFinalizerList();
void visitByGCKind(void* p, GCVisitor& visitor);
// Debugging/validation helpers: if a GC should not happen in certain sections (ex during unwinding),
// use these functions to mark that. This is different from disableGC/enableGC, since it causes an
// assert rather than delaying of the next GC.
......@@ -62,14 +68,74 @@ void startGCUnexpectedRegion();
void endGCUnexpectedRegion();
class GCVisitorNoRedundancy : public GCVisitor {
public:
void _visitRedundant(void** ptr_address) override { visit(ptr_address); }
void _visitRangeRedundant(void** start, void** end) override { visitRange(start, end); }
public:
virtual ~GCVisitorNoRedundancy() {}
virtual void visitRedundant(void* p) { visit(p); }
virtual void visitRangeRedundant(void* const* start, void* const* end) { visitRange(start, end); }
virtual void visitPotentialRedundant(void* p) { visitPotential(p); }
virtual void visitPotentialRangeRedundant(void* const* start, void* const* end) { visitPotentialRange(start, end); }
virtual bool shouldVisitRedundants() { return true; }
void visitPotentialRedundant(void* p) override { visitPotential(p); }
void visitPotentialRangeRedundant(void** start, void** end) override { visitPotentialRange(start, end); }
};
//
// Code to prototype a moving GC.
//
class ReferenceMapWorklist;
#if MOVING_GC
#define MOVING_OVERRIDE override
#else
#define MOVING_OVERRIDE
#endif
#if MOVING_GC
// Bulds the reference map, and also determine which objects cannot be moved.
class GCVisitorPinning : public GCVisitorNoRedundancy {
private:
ReferenceMapWorklist* worklist;
void _visit(void** ptr_address) MOVING_OVERRIDE;
public:
GCVisitorPinning(ReferenceMapWorklist* worklist) : worklist(worklist) {}
virtual ~GCVisitorPinning() {}
void visitPotential(void* p) MOVING_OVERRIDE;
};
// Visits the fields and replaces it with new_values if it was equal to old_value.
class GCVisitorReplacing : public GCVisitor {
private:
void* old_value;
void* new_value;
void _visit(void** p) MOVING_OVERRIDE;
public:
GCVisitorReplacing(void* old_value, void* new_value) : old_value(old_value), new_value(new_value) {}
virtual ~GCVisitorReplacing() {}
void visitPotential(void* p) MOVING_OVERRIDE{};
void visitPotentialRange(void** start, void** end) MOVING_OVERRIDE{};
};
class GCAllocation;
class ReferenceMap {
public:
// Pinned objects are objects that should not be moved (their pointer value should
// never change).
std::unordered_set<GCAllocation*> pinned;
// Map from objects O to all objects that contain a reference to O.
std::unordered_map<GCAllocation*, std::vector<GCAllocation*>> references;
// Track movement (reallocation) of objects.
std::unordered_map<GCAllocation*, GCAllocation*> moves;
};
#endif
}
}
......
......@@ -54,24 +54,54 @@ void popGCObject(gc::GCVisitable* obj);
namespace gc {
class TraceStack;
class GCAllocation;
class TraversalWorklist;
// The base version of the GC visitor is used for marking, in conjuction with a TraversalWorklist.
//
// Conceptually, GCVisitor should be abstract and the 'marking' behavior should be specific
// to a subclass of GCVisitor. However, that requires the use of virtual functions which
// introduce an overhead. Eventually if we really need multiple different kinds of visitors
// we will need some dispatching mechanism but for now, since the moving GC is still WIP,
// the virtualness property is #if'd out for the regular use case with only mark-and-sweep.
class GCVisitor {
private:
TraceStack* stack;
TraversalWorklist* worklist = NULL;
protected:
// The origin object of the current visit calls.
GCAllocation* source = NULL;
#if MOVING_GC
virtual void _visit(void** ptr_address);
virtual void _visitRange(void** start, void** end);
#else
void _visit(void** ptr_address);
void _visitRange(void** start, void** end);
#endif
virtual void _visitRedundant(void** ptr_address) {}
virtual void _visitRangeRedundant(void** start, void** end) {}
public:
GCVisitor(TraceStack* stack) : stack(stack) {}
GCVisitor() {}
GCVisitor(TraversalWorklist* worklist) : worklist(worklist) {}
virtual ~GCVisitor() {}
// These all work on *user* pointers, ie pointers to the user_data section of GCAllocations
void visitIf(void* p) {
if (p)
visit(p);
}
void visit(void* p);
void visitRange(void* const* start, void* const* end);
#if MOVING_GC
virtual void visitPotential(void* p);
virtual void visitPotentialRange(void** start, void** end);
#else
void visitPotential(void* p);
void visitPotentialRange(void* const* start, void* const* end);
void visitPotentialRange(void** start, void** end);
#endif
// The purpose of writing the visit function is to avoid (void**) casts
// which are clumbersome to write at every use of the visit function and
// error-prone (might accidently cast void* to void**).
template <typename T> void visit(T** ptr_address) { _visit(reinterpret_cast<void**>(ptr_address)); }
template <typename T> void visitRange(T** start, T** end) {
_visitRange(reinterpret_cast<void**>(start), reinterpret_cast<void**>(end));
}
// Some object have fields with pointers to Pyston heap objects that we are confident are
// already being scanned elsewhere.
......@@ -82,10 +112,14 @@ public:
// In a moving collector, every reference needs to be visited since the pointer value could
// change. We don't have a moving collector yet, but it's good practice to call visit every
// pointer value and no-op to avoid the performance hit of the mark-and-sweep case.
virtual void visitRedundant(void* p) {}
virtual void visitRedundantRange(void** start, void** end) {}
template <typename T> void visitRedundant(T** ptr_address) {
_visitRedundant(reinterpret_cast<void**>(ptr_address));
}
template <typename T> void visitRangeRedundant(T** start, T** end) {
_visitRangeRedundant(reinterpret_cast<void**>(start), reinterpret_cast<void**>(end));
}
virtual void visitPotentialRedundant(void* p) {}
virtual void visitPotentialRangeRedundant(void* const* start, void* const* end) {}
virtual void visitPotentialRangeRedundant(void** start, void** end) {}
// Visit pointers to objects that we know cannot be moved.
// This is often used to scan a pointer that's a copy of a pointer stored in a place that
......@@ -94,6 +128,8 @@ public:
// change that later for performance.
void visitNonRelocatable(void* p) { visitPotential(p); }
void visitNonRelocatableRange(void** start, void** end) { visitPotentialRange(start, end); }
void setSource(GCAllocation* al) { source = al; }
};
enum class GCKind : uint8_t {
......
......@@ -354,6 +354,26 @@ GCAllocation* SmallArena::realloc(GCAllocation* al, size_t bytes) {
return rtn;
}
GCAllocation* SmallArena::forceRelocate(GCAllocation* al) {
Block* b = Block::forPointer(al);
size_t size = b->size;
// Don't register moves, they don't use more memory and they could trigger another GC.
GCAllocation* rtn = alloc(size);
#ifndef NVALGRIND
VALGRIND_DISABLE_ERROR_REPORTING;
memcpy(rtn, al, size);
VALGRIND_ENABLE_ERROR_REPORTING;
#else
memcpy(rtn, al, size);
#endif
free(al);
return rtn;
}
void SmallArena::free(GCAllocation* alloc) {
Block* b = Block::forPointer(alloc);
size_t size = b->size;
......@@ -414,6 +434,53 @@ void SmallArena::assertConsistent() {
}
#endif
void SmallArena::getPointersInBlockChain(std::vector<GCAllocation*>& ptrs, Block** head) {
while (Block* b = *head) {
int num_objects = b->numObjects();
int first_obj = b->minObjIndex();
int atoms_per_obj = b->atomsPerObj();
for (int atom_idx = first_obj * atoms_per_obj; atom_idx < num_objects * atoms_per_obj;
atom_idx += atoms_per_obj) {
if (b->isfree.isSet(atom_idx))
continue;
void* p = &b->atoms[atom_idx];
GCAllocation* al = reinterpret_cast<GCAllocation*>(p);
ptrs.push_back(al);
}
head = &b->next;
}
}
void SmallArena::forEachReference(std::function<void(GCAllocation*, size_t)> f) {
thread_caches.forEachValue([this, &f](ThreadBlockCache* cache) {
for (int bidx = 0; bidx < NUM_BUCKETS; bidx++) {
Block* h = cache->cache_free_heads[bidx];
std::vector<GCAllocation*> ptrs;
getPointersInBlockChain(ptrs, &cache->cache_free_heads[bidx]);
getPointersInBlockChain(ptrs, &cache->cache_full_heads[bidx]);
for (GCAllocation* al : ptrs) {
f(al, sizes[bidx]);
}
}
});
for (int bidx = 0; bidx < NUM_BUCKETS; bidx++) {
std::vector<GCAllocation*> ptrs;
getPointersInBlockChain(ptrs, &heads[bidx]);
getPointersInBlockChain(ptrs, &full_heads[bidx]);
for (GCAllocation* al : ptrs) {
f(al, sizes[bidx]);
}
}
}
void SmallArena::freeUnmarked(std::vector<Box*>& weakly_referenced) {
assertConsistent();
......@@ -667,8 +734,6 @@ void SmallArena::_getChainStatistics(HeapStatistics* stats, Block** head) {
#define LARGE_CHUNK_INDEX(obj, section) (((char*)(obj) - (char*)(section)) >> CHUNK_BITS)
GCAllocation* LargeArena::alloc(size_t size) {
registerGCManagedBytes(size);
LOCK_REGION(heap->lock);
// printf ("allocLarge %zu\n", size);
......@@ -890,8 +955,6 @@ void LargeArena::_freeLargeObj(LargeObj* obj) {
GCAllocation* HugeArena::alloc(size_t size) {
registerGCManagedBytes(size);
LOCK_REGION(heap->lock);
size_t total_size = size + sizeof(HugeObj);
......
......@@ -93,6 +93,7 @@ inline void registerGCManagedBytes(size_t bytes) {
class Heap;
class ReferenceMap;
struct HeapStatistics;
typedef uint8_t kindid_t;
......@@ -248,7 +249,6 @@ public:
}
GCAllocation* alloc(size_t bytes) {
registerGCManagedBytes(bytes);
if (bytes <= 16)
return _alloc(16, 0);
else if (bytes <= 32)
......@@ -263,7 +263,12 @@ public:
}
}
void forEachReference(std::function<void(GCAllocation*, size_t)>);
GCAllocation* realloc(GCAllocation* alloc, size_t bytes);
GCAllocation* forceRelocate(GCAllocation* alloc);
void free(GCAllocation* al);
GCAllocation* allocationFrom(void* ptr);
......@@ -405,6 +410,7 @@ private:
// TODO only use thread caches if we're in GRWL mode?
threading::PerThreadSet<ThreadBlockCache, Heap*, SmallArena*> thread_caches;
void getPointersInBlockChain(std::vector<GCAllocation*>& ptrs, Block** head);
Block* _allocBlock(uint64_t size, Block** prev);
GCAllocation* _allocFromBlock(Block* b);
Block* _claimBlock(size_t rounded_size, Block** free_head);
......@@ -585,6 +591,7 @@ public:
}
GCAllocation* alloc(size_t bytes) {
registerGCManagedBytes(bytes);
if (bytes > LargeArena::ALLOC_SIZE_LIMIT)
return huge_arena.alloc(bytes);
else if (bytes > sizes[NUM_BUCKETS - 1])
......@@ -593,6 +600,24 @@ public:
return small_arena.alloc(bytes);
}
// Slightly different than realloc in that:
// 1) The size is the same, so we calls alloc in the SmallArena.
// 2) Uses a variant of alloc that doesn't register a change in the number of bytes allocated.
// 3) Always reallocates the object to a different address.
GCAllocation* forceRelocate(GCAllocation* alloc) {
GCAllocation* rtn = NULL;
if (large_arena.contains(alloc)) {
ASSERT(false, "large arena moves not supported yet");
} else if (huge_arena.contains(alloc)) {
ASSERT(false, "huge arena moves not supported yet");
} else {
assert(small_arena.contains(alloc));
rtn = small_arena.forceRelocate(alloc);
}
return rtn;
}
void destructContents(GCAllocation* alloc);
void free(GCAllocation* alloc) {
......@@ -625,6 +650,9 @@ public:
return NULL;
}
// Calls the function for every object in the small heap.
void forEachSmallArenaReference(std::function<void(GCAllocation*, size_t)> f) { small_arena.forEachReference(f); }
// not thread safe:
void freeUnmarked(std::vector<Box*>& weakly_referenced) {
small_arena.freeUnmarked(weakly_referenced);
......
......@@ -233,10 +233,10 @@ public:
Box::gcHandler(v, _b);
BoxedSysFlags* self = static_cast<BoxedSysFlags*>(_b);
v->visit(self->division_warning);
v->visit(self->bytes_warning);
v->visit(self->no_user_site);
v->visit(self->optimize);
v->visit(&self->division_warning);
v->visit(&self->bytes_warning);
v->visit(&self->no_user_site);
v->visit(&self->optimize);
}
static Box* __new__(Box* cls, Box* args, Box* kwargs) {
......
......@@ -54,9 +54,9 @@ public:
Box::gcHandler(v, o);
if (o->bases)
v->visit(o->bases);
v->visit(&o->bases);
if (o->name)
v->visit(o->name);
v->visit(&o->name);
}
};
......@@ -78,7 +78,7 @@ public:
Box::gcHandler(v, o);
if (o->inst_cls)
v->visit(o->inst_cls);
v->visit(&o->inst_cls);
}
};
}
......
......@@ -437,7 +437,7 @@ void BoxedMethodDescriptor::gcHandler(GCVisitor* v, Box* _o) {
BoxedMethodDescriptor* o = static_cast<BoxedMethodDescriptor*>(_o);
Box::gcHandler(v, o);
v->visit(o->type);
v->visit(&o->type);
}
Box* BoxedWrapperDescriptor::descr_get(Box* _self, Box* inst, Box* owner) noexcept {
......@@ -576,7 +576,7 @@ void BoxedWrapperDescriptor::gcHandler(GCVisitor* v, Box* _o) {
BoxedWrapperDescriptor* o = static_cast<BoxedWrapperDescriptor*>(_o);
Box::gcHandler(v, o);
v->visit(o->type);
v->visit(&o->type);
}
static Box* wrapperdescrGetDoc(Box* b, void*) {
......@@ -670,7 +670,7 @@ void BoxedWrapperObject::gcHandler(GCVisitor* v, Box* _o) {
BoxedWrapperObject* o = static_cast<BoxedWrapperObject*>(_o);
Box::gcHandler(v, o);
v->visit(o->obj);
v->visit(&o->obj);
}
void setupDescr() {
......
......@@ -727,8 +727,8 @@ void BoxedDict::gcHandler(GCVisitor* v, Box* b) {
BoxedDict* d = (BoxedDict*)b;
for (auto p : *d) {
v->visit(p.first);
v->visit(p.second);
v->visit(&p.first);
v->visit(&p.second);
}
}
......@@ -737,7 +737,7 @@ void BoxedDictIterator::gcHandler(GCVisitor* v, Box* b) {
Box::gcHandler(v, b);
BoxedDictIterator* it = static_cast<BoxedDictIterator*>(b);
v->visit(it->d);
v->visit(&it->d);
}
void BoxedDictView::gcHandler(GCVisitor* v, Box* b) {
......@@ -745,7 +745,7 @@ void BoxedDictView::gcHandler(GCVisitor* v, Box* b) {
Box::gcHandler(v, b);
BoxedDictView* view = static_cast<BoxedDictView*>(b);
v->visit(view->d);
v->visit(&view->d);
}
static int dict_init(PyObject* self, PyObject* args, PyObject* kwds) noexcept {
......
......@@ -1740,11 +1740,11 @@ void BoxedFile::gcHandler(GCVisitor* v, Box* b) {
assert(isSubclass(b->cls, file_cls));
BoxedFile* f = static_cast<BoxedFile*>(b);
v->visit(f->f_name);
v->visit(f->f_mode);
v->visit(f->f_encoding);
v->visit(f->f_errors);
v->visit(f->f_setbuf);
v->visit(&f->f_name);
v->visit(&f->f_mode);
v->visit(&f->f_encoding);
v->visit(&f->f_errors);
v->visit(&f->f_setbuf);
}
void setupFile() {
......
......@@ -82,8 +82,8 @@ public:
auto f = static_cast<BoxedFrame*>(b);
v->visit(f->_code);
v->visit(f->_globals);
v->visit(&f->_code);
v->visit(&f->_globals);
}
static void simpleDestructor(Box* b) {
......
......@@ -427,27 +427,27 @@ void BoxedGenerator::gcHandler(GCVisitor* v, Box* b) {
BoxedGenerator* g = (BoxedGenerator*)b;
v->visit(g->function);
v->visit(&g->function);
int num_args = g->function->f->numReceivedArgs();
if (num_args >= 1)
v->visit(g->arg1);
v->visit(&g->arg1);
if (num_args >= 2)
v->visit(g->arg2);
v->visit(&g->arg2);
if (num_args >= 3)
v->visit(g->arg3);
v->visit(&g->arg3);
if (g->args)
v->visit(g->args);
v->visit(&g->args);
if (num_args > 3)
v->visitPotentialRange(reinterpret_cast<void* const*>(&g->args->elts[0]),
reinterpret_cast<void* const*>(&g->args->elts[num_args - 3]));
v->visitPotentialRange(reinterpret_cast<void**>(&g->args->elts[0]),
reinterpret_cast<void**>(&g->args->elts[num_args - 3]));
if (g->returnValue)
v->visit(g->returnValue);
v->visit(&g->returnValue);
if (g->exception.type)
v->visit(g->exception.type);
v->visit(&g->exception.type);
if (g->exception.value)
v->visit(g->exception.value);
v->visit(&g->exception.value);
if (g->exception.traceback)
v->visit(g->exception.traceback);
v->visit(&g->exception.traceback);
if (g->running) {
v->visitPotentialRange((void**)g->returnContext,
......
......@@ -29,24 +29,25 @@ namespace pyston {
void HiddenClass::gc_visit(GCVisitor* visitor) {
// Visit children even for the dict-backed case, since children will just be empty
visitor->visitRange((void* const*)&children.vector()[0], (void* const*)&children.vector()[children.size()]);
visitor->visit(attrwrapper_child);
visitor->visitRange(const_cast<HiddenClass**>(&children.vector()[0]),
const_cast<HiddenClass**>(&children.vector()[children.size()]));
visitor->visit(&attrwrapper_child);
if (children.empty()) {
for (auto p : attr_offsets)
visitor->visit(p.first);
visitor->visit(&p.first);
} else {
#if MOVING_GC
// If we have any children, the attr_offsets map will be a subset of the child's map.
for (const auto& p : attr_offsets)
visitor->visitRedundant(p.first);
visitor->visitRedundant(const_cast<BoxedString**>(&p.first));
#endif
}
#if MOVING_GC
// The children should have the entries of the keys of the 'children' map in the attr_offsets map.
for (const auto& p : children) {
visitor->visitRedundant(p.first);
visitor->visitRedundant(const_cast<BoxedString**>(&p.first));
}
#endif
}
......
......@@ -134,7 +134,7 @@ public:
Box::gcHandler(v, b);
BoxedXrangeIterator* it = (BoxedXrangeIterator*)b;
v->visit(it->xrange);
v->visit(const_cast<BoxedXrange**>(&it->xrange));
}
};
......
......@@ -65,9 +65,9 @@ public:
void gc_visit(GCVisitor* v) override {
if (iterator)
v->visit(iterator);
v->visit(&iterator);
if (value)
v->visit(value);
v->visit(&value);
}
};
......@@ -118,7 +118,7 @@ public:
void gc_visit(GCVisitor* v) override {
if (obj)
v->visit(obj);
v->visit(&obj);
}
};
}
......
......@@ -116,9 +116,9 @@ void BoxedSeqIter::gcHandler(GCVisitor* v, Box* b) {
Box::gcHandler(v, b);
BoxedSeqIter* si = static_cast<BoxedSeqIter*>(b);
v->visit(si->b);
v->visit(&si->b);
if (si->next)
v->visit(si->next);
v->visit(&si->next);
}
void BoxedIterWrapper::gcHandler(GCVisitor* v, Box* b) {
......@@ -126,9 +126,9 @@ void BoxedIterWrapper::gcHandler(GCVisitor* v, Box* b) {
Box::gcHandler(v, b);
BoxedIterWrapper* iw = static_cast<BoxedIterWrapper*>(b);
v->visit(iw->iter);
v->visit(&iw->iter);
if (iw->next)
v->visit(iw->next);
v->visit(&iw->next);
}
bool iterwrapperHasnextUnboxed(Box* s) {
......
......@@ -1179,7 +1179,7 @@ extern "C" int PyList_SetSlice(PyObject* a, Py_ssize_t ilow, Py_ssize_t ihigh, P
void BoxedListIterator::gcHandler(GCVisitor* v, Box* b) {
Box::gcHandler(v, b);
BoxedListIterator* it = (BoxedListIterator*)b;
v->visit(it->l);
v->visit(&it->l);
}
void BoxedList::gcHandler(GCVisitor* v, Box* b) {
......@@ -1192,9 +1192,9 @@ void BoxedList::gcHandler(GCVisitor* v, Box* b) {
int capacity = l->capacity;
assert(capacity >= size);
if (capacity)
v->visit(l->elts);
v->visit(&l->elts);
if (size)
v->visitRange((void**)&l->elts->elts[0], (void**)&l->elts->elts[size]);
v->visitRange(&l->elts->elts[0], &l->elts->elts[size]);
}
void setupList() {
......
......@@ -29,7 +29,7 @@ void BoxedSet::gcHandler(GCVisitor* v, Box* b) {
BoxedSet* s = (BoxedSet*)b;
for (auto&& p : s->s) {
v->visit(p.value);
v->visit(&p.value);
}
}
......@@ -57,7 +57,7 @@ public:
BoxedSetIterator* it = (BoxedSetIterator*)b;
v->visit(it->s);
v->visit(&it->s);
}
};
......
......@@ -2430,7 +2430,7 @@ public:
static void gcHandler(GCVisitor* v, Box* b) {
Box::gcHandler(v, b);
BoxedStringIterator* it = (BoxedStringIterator*)b;
v->visit(it->s);
v->visit(&it->s);
}
};
......
......@@ -42,11 +42,11 @@ public:
Box::gcHandler(v, o);
if (o->type)
v->visit(o->type);
v->visit(&o->type);
if (o->obj)
v->visit(o->obj);
v->visit(&o->obj);
if (o->obj_type)
v->visit(o->obj_type);
v->visit(&o->obj_type);
}
};
......
......@@ -40,12 +40,12 @@ void BoxedTraceback::gcHandler(GCVisitor* v, Box* b) {
BoxedTraceback* self = static_cast<BoxedTraceback*>(b);
if (self->py_lines)
v->visit(self->py_lines);
v->visit(&self->py_lines);
if (self->tb_next)
v->visit(self->tb_next);
v->visit(&self->tb_next);
v->visit(self->line.file);
v->visit(self->line.func);
v->visit(&self->line.file);
v->visit(&self->line.func);
Box::gcHandler(v, b);
}
......
......@@ -564,13 +564,13 @@ void BoxedTuple::gcHandler(GCVisitor* v, Box* b) {
Box::gcHandler(v, b);
BoxedTuple* t = (BoxedTuple*)b;
v->visitRange((void* const*)&t->elts[0], (void* const*)&t->elts[t->size()]);
v->visitRange(&t->elts[0], &t->elts[t->size()]);
}
extern "C" void BoxedTupleIterator::gcHandler(GCVisitor* v, Box* b) {
Box::gcHandler(v, b);
BoxedTupleIterator* it = (BoxedTupleIterator*)b;
v->visit(it->t);
v->visit(&it->t);
}
void setupTuple() {
......
......@@ -96,11 +96,11 @@ bool IN_SHUTDOWN = false;
std::vector<BoxedClass*> exception_types;
void FrameInfo::gcVisit(GCVisitor* visitor) {
visitor->visit(boxedLocals);
visitor->visit(exc.traceback);
visitor->visit(exc.type);
visitor->visit(exc.value);
visitor->visit(frame_obj);
visitor->visit(&boxedLocals);
visitor->visit(&exc.traceback);
visitor->visit(&exc.type);
visitor->visit(&exc.value);
visitor->visit(&frame_obj);
}
// Analogue of PyType_GenericAlloc (default tp_alloc), but should only be used for Pyston classes!
......@@ -328,14 +328,14 @@ Box* Box::hasnextOrNullIC() {
void Box::gcHandler(GCVisitor* v, Box* b) {
if (b->cls) {
v->visit(b->cls);
v->visit(&b->cls);
if (b->cls->instancesHaveHCAttrs()) {
HCAttrs* attrs = b->getHCAttrsPtr();
v->visit(attrs->hcls);
v->visit(&attrs->hcls);
if (attrs->attr_list)
v->visit(attrs->attr_list);
v->visit(&attrs->attr_list);
}
if (b->cls->instancesHaveDictAttrs()) {
......@@ -346,7 +346,7 @@ void Box::gcHandler(GCVisitor* v, Box* b) {
BoxedHeapClass* heap_cls = static_cast<BoxedHeapClass*>(b->cls);
BoxedHeapClass::SlotOffset* slotOffsets = heap_cls->slotOffsets();
for (int i = 0; i < heap_cls->nslots(); i++) {
v->visit(*((Box**)((char*)b + slotOffsets[i])));
v->visit(&*((Box**)((char*)b + slotOffsets[i])));
}
}
} else {
......@@ -437,28 +437,28 @@ void BoxedFunction::gcHandler(GCVisitor* v, Box* b) {
// TODO eventually f->name should always be non-NULL, then there'd be no need for this check
if (f->name)
v->visit(f->name);
v->visit(&f->name);
if (f->modname)
v->visit(f->modname);
v->visit(&f->modname);
if (f->doc)
v->visit(f->doc);
v->visit(&f->doc);
if (f->closure)
v->visit(f->closure);
v->visit(&f->closure);
if (f->globals)
v->visit(f->globals);
v->visit(&f->globals);
// It's ok for f->defaults to be NULL here even if f->ndefaults isn't,
// since we could be collecting from inside a BoxedFunctionBase constructor
if (f->ndefaults) {
assert(f->defaults);
v->visit(f->defaults);
v->visit(&f->defaults);
// do a conservative scan since there can be NULLs in there:
v->visitPotentialRange(reinterpret_cast<void* const*>(&f->defaults->elts[0]),
reinterpret_cast<void* const*>(&f->defaults->elts[f->ndefaults]));
v->visitPotentialRange(reinterpret_cast<void**>(&f->defaults->elts[0]),
reinterpret_cast<void**>(&f->defaults->elts[f->ndefaults]));
}
}
......@@ -559,7 +559,7 @@ Box* BoxedModule::getLongConstant(llvm::StringRef ast_str) {
}
template <typename A, typename B, typename C> void visitContiguousMap(GCVisitor* v, ContiguousMap<A, B, C>& map) {
v->visitRange((void* const*)&map.vector()[0], (void* const*)&map.vector()[map.size()]);
v->visitRange(const_cast<B*>(&map.vector()[0]), const_cast<B*>(&map.vector()[map.size()]));
}
void BoxedModule::gcHandler(GCVisitor* v, Box* b) {
......@@ -574,7 +574,7 @@ void BoxedModule::gcHandler(GCVisitor* v, Box* b) {
visitContiguousMap(v, d->imaginary_constants);
visitContiguousMap(v, d->long_constants);
if (!d->keep_alive.empty())
v->visitRange((void**)&d->keep_alive[0], (void**)((&d->keep_alive[0]) + d->keep_alive.size()));
v->visitRange(&d->keep_alive[0], ((&d->keep_alive[0]) + d->keep_alive.size()));
}
// This mustn't throw; our IR generator generates calls to it without "invoke" even when there are exception handlers /
......@@ -1365,20 +1365,20 @@ void BoxedHeapClass::gcHandler(GCVisitor* v, Box* b) {
BoxedClass* cls = (BoxedClass*)b;
if (cls->tp_base)
v->visit(cls->tp_base);
v->visit(&cls->tp_base);
if (cls->tp_dict)
v->visit(cls->tp_dict);
v->visit(&cls->tp_dict);
if (cls->tp_mro)
v->visit(cls->tp_mro);
v->visit(&cls->tp_mro);
if (cls->tp_bases)
v->visit(cls->tp_bases);
v->visit(&cls->tp_bases);
if (cls->tp_subclasses)
v->visit(cls->tp_subclasses);
v->visit(&cls->tp_subclasses);
if (cls->tp_flags & Py_TPFLAGS_HEAPTYPE) {
BoxedHeapClass* hcls = static_cast<BoxedHeapClass*>(cls);
assert(hcls->ht_name);
v->visit(hcls->ht_name);
v->visit(&hcls->ht_name);
}
}
......@@ -1428,9 +1428,9 @@ void BoxedInstanceMethod::gcHandler(GCVisitor* v, Box* b) {
BoxedInstanceMethod* im = (BoxedInstanceMethod*)b;
v->visit(im->obj);
v->visit(im->func);
v->visit(im->im_class);
v->visit(&im->obj);
v->visit(&im->func);
v->visit(&im->im_class);
}
void BoxedProperty::gcHandler(GCVisitor* v, Box* b) {
......@@ -1439,13 +1439,13 @@ void BoxedProperty::gcHandler(GCVisitor* v, Box* b) {
BoxedProperty* prop = (BoxedProperty*)b;
if (prop->prop_get)
v->visit(prop->prop_get);
v->visit(&prop->prop_get);
if (prop->prop_set)
v->visit(prop->prop_set);
v->visit(&prop->prop_set);
if (prop->prop_del)
v->visit(prop->prop_del);
v->visit(&prop->prop_del);
if (prop->prop_doc)
v->visit(prop->prop_doc);
v->visit(&prop->prop_doc);
}
void BoxedStaticmethod::gcHandler(GCVisitor* v, Box* b) {
......@@ -1454,7 +1454,7 @@ void BoxedStaticmethod::gcHandler(GCVisitor* v, Box* b) {
BoxedStaticmethod* sm = (BoxedStaticmethod*)b;
if (sm->sm_callable)
v->visit(sm->sm_callable);
v->visit(&sm->sm_callable);
}
void BoxedClassmethod::gcHandler(GCVisitor* v, Box* b) {
......@@ -1463,7 +1463,7 @@ void BoxedClassmethod::gcHandler(GCVisitor* v, Box* b) {
BoxedClassmethod* cm = (BoxedClassmethod*)b;
if (cm->cm_callable)
v->visit(cm->cm_callable);
v->visit(&cm->cm_callable);
}
void BoxedSlice::gcHandler(GCVisitor* v, Box* b) {
......@@ -1473,15 +1473,15 @@ void BoxedSlice::gcHandler(GCVisitor* v, Box* b) {
BoxedSlice* sl = static_cast<BoxedSlice*>(b);
v->visit(sl->start);
v->visit(sl->stop);
v->visit(sl->step);
v->visit(&sl->start);
v->visit(&sl->stop);
v->visit(&sl->step);
}
static int call_gc_visit(PyObject* val, void* arg) {
if (val) {
GCVisitor* v = static_cast<GCVisitor*>(arg);
v->visit(val);
v->visit(&val);
}
return 0;
}
......@@ -1500,11 +1500,11 @@ void BoxedClosure::gcHandler(GCVisitor* v, Box* b) {
BoxedClosure* c = (BoxedClosure*)b;
if (c->parent)
v->visit(c->parent);
v->visit(&c->parent);
for (int i = 0; i < c->nelts; i++) {
if (c->elts[i])
v->visit(c->elts[i]);
v->visit(&c->elts[i]);
}
}
......@@ -2193,7 +2193,7 @@ public:
Box::gcHandler(v, b);
AttrWrapperIter* self = (AttrWrapperIter*)b;
v->visit(self->hcls);
v->visit(&self->hcls);
}
static Box* hasnext(Box* _self);
......@@ -2228,7 +2228,7 @@ public:
Box::gcHandler(v, b);
AttrWrapper* aw = (AttrWrapper*)b;
v->visit(aw->b);
v->visit(&aw->b);
}
static Box* setitem(Box* _self, Box* _key, Box* value) {
......@@ -3258,8 +3258,8 @@ void unicode_visit(GCVisitor* v, Box* b) {
Box::gcHandler(v, b);
PyUnicodeObject* u = (PyUnicodeObject*)b;
v->visit(u->str);
v->visit(u->defenc);
v->visit(&u->str);
v->visit(&u->defenc);
}
extern "C" PyUnicodeObject* unicode_empty;
......
......@@ -6,7 +6,7 @@ class C(object):
def f(self, i):
return i * i
def __getitem__(self, i):
if i < 1000:
if i < 200:
return self.f(i)
raise IndexError(i)
......@@ -25,7 +25,7 @@ class C2(object):
self.n += 1
return self.n * 2
def next(self):
if self.n < 1000:
if self.n < 200:
return self.f()
raise StopIteration()
......
......@@ -17,7 +17,7 @@ def call_function_far_up_the_stack(fn, num_calls_left=200):
# It's useful to call the GC at different locations in the stack in case that it's the
# call to the GC itself that left a lingering pointer (e.g. the pointer could be the
# __del__ attribute of an object we'd like to collect).
def call_gc_throughout_the_stack(number_of_gc_calls, num_calls_left=100):
def call_gc_throughout_the_stack(number_of_gc_calls, num_calls_left=30):
if num_calls_left > 0:
call_gc_throughout_the_stack(number_of_gc_calls, num_calls_left - 1)
if number_of_gc_calls >= num_calls_left:
......
......@@ -31,7 +31,7 @@ def p(x):
return [hex(ord(i)) for i in x]
s = u"\u20AC" # euro sign
print p(u"\N{EURO SIGN}")
print p(s)
print p(s)
print p(s.encode("utf8"))
print p(s.encode("utf16"))
print p(s.encode("utf32"))
......@@ -51,7 +51,7 @@ for i in xrange(100):
print repr(BaseException().__unicode__())
gc.collect()
# do some allocations:
for j in xrange(100):
for j in xrange(101):
[None] * j
print u'' in ''
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment