Commit 7b84d99d authored by Kevin Modzelewski's avatar Kevin Modzelewski

Merge pull request #889 from rudi-c/movingmerge

Optionally move objects around in memory to prepare Pyston for a moving collector.
parents db991b38 8a510e30
......@@ -28,7 +28,7 @@ typedef int (*update_callback)(PyTypeObject*, void*);
PyObject* tp_new_wrapper(PyTypeObject* self, BoxedTuple* args, Box* kwds) noexcept;
extern "C" void conservativeGCHandler(GCVisitor* v, Box* b) noexcept {
v->visitPotentialRange((void* const*)b, (void* const*)((char*)b + b->cls->tp_basicsize));
v->visitPotentialRange((void**)b, (void**)((char*)b + b->cls->tp_basicsize));
}
extern "C" void conservativeAndBasesGCHandler(GCVisitor* v, Box* b) noexcept {
......
......@@ -68,8 +68,8 @@ public:
BoxedCApiFunction* o = static_cast<BoxedCApiFunction*>(_o);
Box::gcHandler(v, o);
v->visit(o->passthrough);
v->visit(o->module);
v->visit(&o->passthrough);
v->visit(&o->module);
}
};
static_assert(sizeof(BoxedCApiFunction) == sizeof(PyCFunctionObject), "");
......
......@@ -580,9 +580,12 @@ public:
PythonUnwindSession* o = static_cast<PythonUnwindSession*>(_o);
v->visitIf(o->exc_info.type);
v->visitIf(o->exc_info.value);
v->visitIf(o->exc_info.traceback);
if (o->exc_info.type)
v->visit(&o->exc_info.type);
if (o->exc_info.value)
v->visit(&o->exc_info.value);
if (o->exc_info.traceback)
v->visit(&o->exc_info.traceback);
}
};
static __thread PythonUnwindSession* cur_unwind;
......
......@@ -125,16 +125,16 @@ public:
void accept(gc::GCVisitor* v) {
auto pub_state = public_thread_state;
if (pub_state->curexc_type)
v->visit(pub_state->curexc_type);
v->visit(&pub_state->curexc_type);
if (pub_state->curexc_value)
v->visit(pub_state->curexc_value);
v->visit(&pub_state->curexc_value);
if (pub_state->curexc_traceback)
v->visit(pub_state->curexc_traceback);
v->visit(&pub_state->curexc_traceback);
if (pub_state->dict)
v->visit(pub_state->dict);
v->visit(&pub_state->dict);
for (auto& stack_info : previous_stacks) {
v->visit(stack_info.next_generator);
v->visit(&stack_info.next_generator);
#if STACK_GROWS_DOWN
v->visitPotentialRange((void**)stack_info.stack_limit, (void**)stack_info.stack_start);
#else
......
This diff is collapsed.
......@@ -15,6 +15,10 @@
#ifndef PYSTON_GC_COLLECTOR_H
#define PYSTON_GC_COLLECTOR_H
#include <unordered_map>
#include <unordered_set>
#include <vector>
#include "gc/gc.h"
namespace pyston {
......@@ -55,6 +59,8 @@ bool isNonheapRoot(void* p);
void registerPythonObject(Box* b);
void invalidateOrderedFinalizerList();
void visitByGCKind(void* p, GCVisitor& visitor);
// Debugging/validation helpers: if a GC should not happen in certain sections (ex during unwinding),
// use these functions to mark that. This is different from disableGC/enableGC, since it causes an
// assert rather than delaying of the next GC.
......@@ -62,14 +68,74 @@ void startGCUnexpectedRegion();
void endGCUnexpectedRegion();
class GCVisitorNoRedundancy : public GCVisitor {
public:
void _visitRedundant(void** ptr_address) override { visit(ptr_address); }
void _visitRangeRedundant(void** start, void** end) override { visitRange(start, end); }
public:
virtual ~GCVisitorNoRedundancy() {}
virtual void visitRedundant(void* p) { visit(p); }
virtual void visitRangeRedundant(void* const* start, void* const* end) { visitRange(start, end); }
virtual void visitPotentialRedundant(void* p) { visitPotential(p); }
virtual void visitPotentialRangeRedundant(void* const* start, void* const* end) { visitPotentialRange(start, end); }
virtual bool shouldVisitRedundants() { return true; }
void visitPotentialRedundant(void* p) override { visitPotential(p); }
void visitPotentialRangeRedundant(void** start, void** end) override { visitPotentialRange(start, end); }
};
//
// Code to prototype a moving GC.
//
class ReferenceMapWorklist;
#if MOVING_GC
#define MOVING_OVERRIDE override
#else
#define MOVING_OVERRIDE
#endif
#if MOVING_GC
// Bulds the reference map, and also determine which objects cannot be moved.
class GCVisitorPinning : public GCVisitorNoRedundancy {
private:
ReferenceMapWorklist* worklist;
void _visit(void** ptr_address) MOVING_OVERRIDE;
public:
GCVisitorPinning(ReferenceMapWorklist* worklist) : worklist(worklist) {}
virtual ~GCVisitorPinning() {}
void visitPotential(void* p) MOVING_OVERRIDE;
};
// Visits the fields and replaces it with new_values if it was equal to old_value.
class GCVisitorReplacing : public GCVisitor {
private:
void* old_value;
void* new_value;
void _visit(void** p) MOVING_OVERRIDE;
public:
GCVisitorReplacing(void* old_value, void* new_value) : old_value(old_value), new_value(new_value) {}
virtual ~GCVisitorReplacing() {}
void visitPotential(void* p) MOVING_OVERRIDE{};
void visitPotentialRange(void** start, void** end) MOVING_OVERRIDE{};
};
class GCAllocation;
class ReferenceMap {
public:
// Pinned objects are objects that should not be moved (their pointer value should
// never change).
std::unordered_set<GCAllocation*> pinned;
// Map from objects O to all objects that contain a reference to O.
std::unordered_map<GCAllocation*, std::vector<GCAllocation*>> references;
// Track movement (reallocation) of objects.
std::unordered_map<GCAllocation*, GCAllocation*> moves;
};
#endif
}
}
......
......@@ -54,24 +54,54 @@ void popGCObject(gc::GCVisitable* obj);
namespace gc {
class TraceStack;
class GCAllocation;
class TraversalWorklist;
// The base version of the GC visitor is used for marking, in conjuction with a TraversalWorklist.
//
// Conceptually, GCVisitor should be abstract and the 'marking' behavior should be specific
// to a subclass of GCVisitor. However, that requires the use of virtual functions which
// introduce an overhead. Eventually if we really need multiple different kinds of visitors
// we will need some dispatching mechanism but for now, since the moving GC is still WIP,
// the virtualness property is #if'd out for the regular use case with only mark-and-sweep.
class GCVisitor {
private:
TraceStack* stack;
TraversalWorklist* worklist = NULL;
protected:
// The origin object of the current visit calls.
GCAllocation* source = NULL;
#if MOVING_GC
virtual void _visit(void** ptr_address);
virtual void _visitRange(void** start, void** end);
#else
void _visit(void** ptr_address);
void _visitRange(void** start, void** end);
#endif
virtual void _visitRedundant(void** ptr_address) {}
virtual void _visitRangeRedundant(void** start, void** end) {}
public:
GCVisitor(TraceStack* stack) : stack(stack) {}
GCVisitor() {}
GCVisitor(TraversalWorklist* worklist) : worklist(worklist) {}
virtual ~GCVisitor() {}
// These all work on *user* pointers, ie pointers to the user_data section of GCAllocations
void visitIf(void* p) {
if (p)
visit(p);
}
void visit(void* p);
void visitRange(void* const* start, void* const* end);
#if MOVING_GC
virtual void visitPotential(void* p);
virtual void visitPotentialRange(void** start, void** end);
#else
void visitPotential(void* p);
void visitPotentialRange(void* const* start, void* const* end);
void visitPotentialRange(void** start, void** end);
#endif
// The purpose of writing the visit function is to avoid (void**) casts
// which are clumbersome to write at every use of the visit function and
// error-prone (might accidently cast void* to void**).
template <typename T> void visit(T** ptr_address) { _visit(reinterpret_cast<void**>(ptr_address)); }
template <typename T> void visitRange(T** start, T** end) {
_visitRange(reinterpret_cast<void**>(start), reinterpret_cast<void**>(end));
}
// Some object have fields with pointers to Pyston heap objects that we are confident are
// already being scanned elsewhere.
......@@ -82,10 +112,14 @@ public:
// In a moving collector, every reference needs to be visited since the pointer value could
// change. We don't have a moving collector yet, but it's good practice to call visit every
// pointer value and no-op to avoid the performance hit of the mark-and-sweep case.
virtual void visitRedundant(void* p) {}
virtual void visitRedundantRange(void** start, void** end) {}
template <typename T> void visitRedundant(T** ptr_address) {
_visitRedundant(reinterpret_cast<void**>(ptr_address));
}
template <typename T> void visitRangeRedundant(T** start, T** end) {
_visitRangeRedundant(reinterpret_cast<void**>(start), reinterpret_cast<void**>(end));
}
virtual void visitPotentialRedundant(void* p) {}
virtual void visitPotentialRangeRedundant(void* const* start, void* const* end) {}
virtual void visitPotentialRangeRedundant(void** start, void** end) {}
// Visit pointers to objects that we know cannot be moved.
// This is often used to scan a pointer that's a copy of a pointer stored in a place that
......@@ -94,6 +128,8 @@ public:
// change that later for performance.
void visitNonRelocatable(void* p) { visitPotential(p); }
void visitNonRelocatableRange(void** start, void** end) { visitPotentialRange(start, end); }
void setSource(GCAllocation* al) { source = al; }
};
enum class GCKind : uint8_t {
......
......@@ -354,6 +354,26 @@ GCAllocation* SmallArena::realloc(GCAllocation* al, size_t bytes) {
return rtn;
}
GCAllocation* SmallArena::forceRelocate(GCAllocation* al) {
Block* b = Block::forPointer(al);
size_t size = b->size;
// Don't register moves, they don't use more memory and they could trigger another GC.
GCAllocation* rtn = alloc(size);
#ifndef NVALGRIND
VALGRIND_DISABLE_ERROR_REPORTING;
memcpy(rtn, al, size);
VALGRIND_ENABLE_ERROR_REPORTING;
#else
memcpy(rtn, al, size);
#endif
free(al);
return rtn;
}
void SmallArena::free(GCAllocation* alloc) {
Block* b = Block::forPointer(alloc);
size_t size = b->size;
......@@ -414,6 +434,53 @@ void SmallArena::assertConsistent() {
}
#endif
void SmallArena::getPointersInBlockChain(std::vector<GCAllocation*>& ptrs, Block** head) {
while (Block* b = *head) {
int num_objects = b->numObjects();
int first_obj = b->minObjIndex();
int atoms_per_obj = b->atomsPerObj();
for (int atom_idx = first_obj * atoms_per_obj; atom_idx < num_objects * atoms_per_obj;
atom_idx += atoms_per_obj) {
if (b->isfree.isSet(atom_idx))
continue;
void* p = &b->atoms[atom_idx];
GCAllocation* al = reinterpret_cast<GCAllocation*>(p);
ptrs.push_back(al);
}
head = &b->next;
}
}
void SmallArena::forEachReference(std::function<void(GCAllocation*, size_t)> f) {
thread_caches.forEachValue([this, &f](ThreadBlockCache* cache) {
for (int bidx = 0; bidx < NUM_BUCKETS; bidx++) {
Block* h = cache->cache_free_heads[bidx];
std::vector<GCAllocation*> ptrs;
getPointersInBlockChain(ptrs, &cache->cache_free_heads[bidx]);
getPointersInBlockChain(ptrs, &cache->cache_full_heads[bidx]);
for (GCAllocation* al : ptrs) {
f(al, sizes[bidx]);
}
}
});
for (int bidx = 0; bidx < NUM_BUCKETS; bidx++) {
std::vector<GCAllocation*> ptrs;
getPointersInBlockChain(ptrs, &heads[bidx]);
getPointersInBlockChain(ptrs, &full_heads[bidx]);
for (GCAllocation* al : ptrs) {
f(al, sizes[bidx]);
}
}
}
void SmallArena::freeUnmarked(std::vector<Box*>& weakly_referenced) {
assertConsistent();
......@@ -667,8 +734,6 @@ void SmallArena::_getChainStatistics(HeapStatistics* stats, Block** head) {
#define LARGE_CHUNK_INDEX(obj, section) (((char*)(obj) - (char*)(section)) >> CHUNK_BITS)
GCAllocation* LargeArena::alloc(size_t size) {
registerGCManagedBytes(size);
LOCK_REGION(heap->lock);
// printf ("allocLarge %zu\n", size);
......@@ -890,8 +955,6 @@ void LargeArena::_freeLargeObj(LargeObj* obj) {
GCAllocation* HugeArena::alloc(size_t size) {
registerGCManagedBytes(size);
LOCK_REGION(heap->lock);
size_t total_size = size + sizeof(HugeObj);
......
......@@ -93,6 +93,7 @@ inline void registerGCManagedBytes(size_t bytes) {
class Heap;
class ReferenceMap;
struct HeapStatistics;
typedef uint8_t kindid_t;
......@@ -248,7 +249,6 @@ public:
}
GCAllocation* alloc(size_t bytes) {
registerGCManagedBytes(bytes);
if (bytes <= 16)
return _alloc(16, 0);
else if (bytes <= 32)
......@@ -263,7 +263,12 @@ public:
}
}
void forEachReference(std::function<void(GCAllocation*, size_t)>);
GCAllocation* realloc(GCAllocation* alloc, size_t bytes);
GCAllocation* forceRelocate(GCAllocation* alloc);
void free(GCAllocation* al);
GCAllocation* allocationFrom(void* ptr);
......@@ -405,6 +410,7 @@ private:
// TODO only use thread caches if we're in GRWL mode?
threading::PerThreadSet<ThreadBlockCache, Heap*, SmallArena*> thread_caches;
void getPointersInBlockChain(std::vector<GCAllocation*>& ptrs, Block** head);
Block* _allocBlock(uint64_t size, Block** prev);
GCAllocation* _allocFromBlock(Block* b);
Block* _claimBlock(size_t rounded_size, Block** free_head);
......@@ -585,6 +591,7 @@ public:
}
GCAllocation* alloc(size_t bytes) {
registerGCManagedBytes(bytes);
if (bytes > LargeArena::ALLOC_SIZE_LIMIT)
return huge_arena.alloc(bytes);
else if (bytes > sizes[NUM_BUCKETS - 1])
......@@ -593,6 +600,24 @@ public:
return small_arena.alloc(bytes);
}
// Slightly different than realloc in that:
// 1) The size is the same, so we calls alloc in the SmallArena.
// 2) Uses a variant of alloc that doesn't register a change in the number of bytes allocated.
// 3) Always reallocates the object to a different address.
GCAllocation* forceRelocate(GCAllocation* alloc) {
GCAllocation* rtn = NULL;
if (large_arena.contains(alloc)) {
ASSERT(false, "large arena moves not supported yet");
} else if (huge_arena.contains(alloc)) {
ASSERT(false, "huge arena moves not supported yet");
} else {
assert(small_arena.contains(alloc));
rtn = small_arena.forceRelocate(alloc);
}
return rtn;
}
void destructContents(GCAllocation* alloc);
void free(GCAllocation* alloc) {
......@@ -625,6 +650,9 @@ public:
return NULL;
}
// Calls the function for every object in the small heap.
void forEachSmallArenaReference(std::function<void(GCAllocation*, size_t)> f) { small_arena.forEachReference(f); }
// not thread safe:
void freeUnmarked(std::vector<Box*>& weakly_referenced) {
small_arena.freeUnmarked(weakly_referenced);
......
......@@ -233,10 +233,10 @@ public:
Box::gcHandler(v, _b);
BoxedSysFlags* self = static_cast<BoxedSysFlags*>(_b);
v->visit(self->division_warning);
v->visit(self->bytes_warning);
v->visit(self->no_user_site);
v->visit(self->optimize);
v->visit(&self->division_warning);
v->visit(&self->bytes_warning);
v->visit(&self->no_user_site);
v->visit(&self->optimize);
}
static Box* __new__(Box* cls, Box* args, Box* kwargs) {
......
......@@ -54,9 +54,9 @@ public:
Box::gcHandler(v, o);
if (o->bases)
v->visit(o->bases);
v->visit(&o->bases);
if (o->name)
v->visit(o->name);
v->visit(&o->name);
}
};
......@@ -78,7 +78,7 @@ public:
Box::gcHandler(v, o);
if (o->inst_cls)
v->visit(o->inst_cls);
v->visit(&o->inst_cls);
}
};
}
......
......@@ -437,7 +437,7 @@ void BoxedMethodDescriptor::gcHandler(GCVisitor* v, Box* _o) {
BoxedMethodDescriptor* o = static_cast<BoxedMethodDescriptor*>(_o);
Box::gcHandler(v, o);
v->visit(o->type);
v->visit(&o->type);
}
Box* BoxedWrapperDescriptor::descr_get(Box* _self, Box* inst, Box* owner) noexcept {
......@@ -576,7 +576,7 @@ void BoxedWrapperDescriptor::gcHandler(GCVisitor* v, Box* _o) {
BoxedWrapperDescriptor* o = static_cast<BoxedWrapperDescriptor*>(_o);
Box::gcHandler(v, o);
v->visit(o->type);
v->visit(&o->type);
}
static Box* wrapperdescrGetDoc(Box* b, void*) {
......@@ -670,7 +670,7 @@ void BoxedWrapperObject::gcHandler(GCVisitor* v, Box* _o) {
BoxedWrapperObject* o = static_cast<BoxedWrapperObject*>(_o);
Box::gcHandler(v, o);
v->visit(o->obj);
v->visit(&o->obj);
}
void setupDescr() {
......
......@@ -727,8 +727,8 @@ void BoxedDict::gcHandler(GCVisitor* v, Box* b) {
BoxedDict* d = (BoxedDict*)b;
for (auto p : *d) {
v->visit(p.first);
v->visit(p.second);
v->visit(&p.first);
v->visit(&p.second);
}
}
......@@ -737,7 +737,7 @@ void BoxedDictIterator::gcHandler(GCVisitor* v, Box* b) {
Box::gcHandler(v, b);
BoxedDictIterator* it = static_cast<BoxedDictIterator*>(b);
v->visit(it->d);
v->visit(&it->d);
}
void BoxedDictView::gcHandler(GCVisitor* v, Box* b) {
......@@ -745,7 +745,7 @@ void BoxedDictView::gcHandler(GCVisitor* v, Box* b) {
Box::gcHandler(v, b);
BoxedDictView* view = static_cast<BoxedDictView*>(b);
v->visit(view->d);
v->visit(&view->d);
}
static int dict_init(PyObject* self, PyObject* args, PyObject* kwds) noexcept {
......
......@@ -1740,11 +1740,11 @@ void BoxedFile::gcHandler(GCVisitor* v, Box* b) {
assert(isSubclass(b->cls, file_cls));
BoxedFile* f = static_cast<BoxedFile*>(b);
v->visit(f->f_name);
v->visit(f->f_mode);
v->visit(f->f_encoding);
v->visit(f->f_errors);
v->visit(f->f_setbuf);
v->visit(&f->f_name);
v->visit(&f->f_mode);
v->visit(&f->f_encoding);
v->visit(&f->f_errors);
v->visit(&f->f_setbuf);
}
void setupFile() {
......
......@@ -82,8 +82,8 @@ public:
auto f = static_cast<BoxedFrame*>(b);
v->visit(f->_code);
v->visit(f->_globals);
v->visit(&f->_code);
v->visit(&f->_globals);
}
static void simpleDestructor(Box* b) {
......
......@@ -427,27 +427,27 @@ void BoxedGenerator::gcHandler(GCVisitor* v, Box* b) {
BoxedGenerator* g = (BoxedGenerator*)b;
v->visit(g->function);
v->visit(&g->function);
int num_args = g->function->f->numReceivedArgs();
if (num_args >= 1)
v->visit(g->arg1);
v->visit(&g->arg1);
if (num_args >= 2)
v->visit(g->arg2);
v->visit(&g->arg2);
if (num_args >= 3)
v->visit(g->arg3);
v->visit(&g->arg3);
if (g->args)
v->visit(g->args);
v->visit(&g->args);
if (num_args > 3)
v->visitPotentialRange(reinterpret_cast<void* const*>(&g->args->elts[0]),
reinterpret_cast<void* const*>(&g->args->elts[num_args - 3]));
v->visitPotentialRange(reinterpret_cast<void**>(&g->args->elts[0]),
reinterpret_cast<void**>(&g->args->elts[num_args - 3]));
if (g->returnValue)
v->visit(g->returnValue);
v->visit(&g->returnValue);
if (g->exception.type)
v->visit(g->exception.type);
v->visit(&g->exception.type);
if (g->exception.value)
v->visit(g->exception.value);
v->visit(&g->exception.value);
if (g->exception.traceback)
v->visit(g->exception.traceback);
v->visit(&g->exception.traceback);
if (g->running) {
v->visitPotentialRange((void**)g->returnContext,
......
......@@ -29,24 +29,25 @@ namespace pyston {
void HiddenClass::gc_visit(GCVisitor* visitor) {
// Visit children even for the dict-backed case, since children will just be empty
visitor->visitRange((void* const*)&children.vector()[0], (void* const*)&children.vector()[children.size()]);
visitor->visit(attrwrapper_child);
visitor->visitRange(const_cast<HiddenClass**>(&children.vector()[0]),
const_cast<HiddenClass**>(&children.vector()[children.size()]));
visitor->visit(&attrwrapper_child);
if (children.empty()) {
for (auto p : attr_offsets)
visitor->visit(p.first);
visitor->visit(&p.first);
} else {
#if MOVING_GC
// If we have any children, the attr_offsets map will be a subset of the child's map.
for (const auto& p : attr_offsets)
visitor->visitRedundant(p.first);
visitor->visitRedundant(const_cast<BoxedString**>(&p.first));
#endif
}
#if MOVING_GC
// The children should have the entries of the keys of the 'children' map in the attr_offsets map.
for (const auto& p : children) {
visitor->visitRedundant(p.first);
visitor->visitRedundant(const_cast<BoxedString**>(&p.first));
}
#endif
}
......
......@@ -134,7 +134,7 @@ public:
Box::gcHandler(v, b);
BoxedXrangeIterator* it = (BoxedXrangeIterator*)b;
v->visit(it->xrange);
v->visit(const_cast<BoxedXrange**>(&it->xrange));
}
};
......
......@@ -65,9 +65,9 @@ public:
void gc_visit(GCVisitor* v) override {
if (iterator)
v->visit(iterator);
v->visit(&iterator);
if (value)
v->visit(value);
v->visit(&value);
}
};
......@@ -118,7 +118,7 @@ public:
void gc_visit(GCVisitor* v) override {
if (obj)
v->visit(obj);
v->visit(&obj);
}
};
}
......
......@@ -116,9 +116,9 @@ void BoxedSeqIter::gcHandler(GCVisitor* v, Box* b) {
Box::gcHandler(v, b);
BoxedSeqIter* si = static_cast<BoxedSeqIter*>(b);
v->visit(si->b);
v->visit(&si->b);
if (si->next)
v->visit(si->next);
v->visit(&si->next);
}
void BoxedIterWrapper::gcHandler(GCVisitor* v, Box* b) {
......@@ -126,9 +126,9 @@ void BoxedIterWrapper::gcHandler(GCVisitor* v, Box* b) {
Box::gcHandler(v, b);
BoxedIterWrapper* iw = static_cast<BoxedIterWrapper*>(b);
v->visit(iw->iter);
v->visit(&iw->iter);
if (iw->next)
v->visit(iw->next);
v->visit(&iw->next);
}
bool iterwrapperHasnextUnboxed(Box* s) {
......
......@@ -1179,7 +1179,7 @@ extern "C" int PyList_SetSlice(PyObject* a, Py_ssize_t ilow, Py_ssize_t ihigh, P
void BoxedListIterator::gcHandler(GCVisitor* v, Box* b) {
Box::gcHandler(v, b);
BoxedListIterator* it = (BoxedListIterator*)b;
v->visit(it->l);
v->visit(&it->l);
}
void BoxedList::gcHandler(GCVisitor* v, Box* b) {
......@@ -1192,9 +1192,9 @@ void BoxedList::gcHandler(GCVisitor* v, Box* b) {
int capacity = l->capacity;
assert(capacity >= size);
if (capacity)
v->visit(l->elts);
v->visit(&l->elts);
if (size)
v->visitRange((void**)&l->elts->elts[0], (void**)&l->elts->elts[size]);
v->visitRange(&l->elts->elts[0], &l->elts->elts[size]);
}
void setupList() {
......
......@@ -29,7 +29,7 @@ void BoxedSet::gcHandler(GCVisitor* v, Box* b) {
BoxedSet* s = (BoxedSet*)b;
for (auto&& p : s->s) {
v->visit(p.value);
v->visit(&p.value);
}
}
......@@ -57,7 +57,7 @@ public:
BoxedSetIterator* it = (BoxedSetIterator*)b;
v->visit(it->s);
v->visit(&it->s);
}
};
......
......@@ -2430,7 +2430,7 @@ public:
static void gcHandler(GCVisitor* v, Box* b) {
Box::gcHandler(v, b);
BoxedStringIterator* it = (BoxedStringIterator*)b;
v->visit(it->s);
v->visit(&it->s);
}
};
......
......@@ -42,11 +42,11 @@ public:
Box::gcHandler(v, o);
if (o->type)
v->visit(o->type);
v->visit(&o->type);
if (o->obj)
v->visit(o->obj);
v->visit(&o->obj);
if (o->obj_type)
v->visit(o->obj_type);
v->visit(&o->obj_type);
}
};
......
......@@ -40,12 +40,12 @@ void BoxedTraceback::gcHandler(GCVisitor* v, Box* b) {
BoxedTraceback* self = static_cast<BoxedTraceback*>(b);
if (self->py_lines)
v->visit(self->py_lines);
v->visit(&self->py_lines);
if (self->tb_next)
v->visit(self->tb_next);
v->visit(&self->tb_next);
v->visit(self->line.file);
v->visit(self->line.func);
v->visit(&self->line.file);
v->visit(&self->line.func);
Box::gcHandler(v, b);
}
......
......@@ -564,13 +564,13 @@ void BoxedTuple::gcHandler(GCVisitor* v, Box* b) {
Box::gcHandler(v, b);
BoxedTuple* t = (BoxedTuple*)b;
v->visitRange((void* const*)&t->elts[0], (void* const*)&t->elts[t->size()]);
v->visitRange(&t->elts[0], &t->elts[t->size()]);
}
extern "C" void BoxedTupleIterator::gcHandler(GCVisitor* v, Box* b) {
Box::gcHandler(v, b);
BoxedTupleIterator* it = (BoxedTupleIterator*)b;
v->visit(it->t);
v->visit(&it->t);
}
void setupTuple() {
......
......@@ -96,11 +96,11 @@ bool IN_SHUTDOWN = false;
std::vector<BoxedClass*> exception_types;
void FrameInfo::gcVisit(GCVisitor* visitor) {
visitor->visit(boxedLocals);
visitor->visit(exc.traceback);
visitor->visit(exc.type);
visitor->visit(exc.value);
visitor->visit(frame_obj);
visitor->visit(&boxedLocals);
visitor->visit(&exc.traceback);
visitor->visit(&exc.type);
visitor->visit(&exc.value);
visitor->visit(&frame_obj);
}
// Analogue of PyType_GenericAlloc (default tp_alloc), but should only be used for Pyston classes!
......@@ -328,14 +328,14 @@ Box* Box::hasnextOrNullIC() {
void Box::gcHandler(GCVisitor* v, Box* b) {
if (b->cls) {
v->visit(b->cls);
v->visit(&b->cls);
if (b->cls->instancesHaveHCAttrs()) {
HCAttrs* attrs = b->getHCAttrsPtr();
v->visit(attrs->hcls);
v->visit(&attrs->hcls);
if (attrs->attr_list)
v->visit(attrs->attr_list);
v->visit(&attrs->attr_list);
}
if (b->cls->instancesHaveDictAttrs()) {
......@@ -346,7 +346,7 @@ void Box::gcHandler(GCVisitor* v, Box* b) {
BoxedHeapClass* heap_cls = static_cast<BoxedHeapClass*>(b->cls);
BoxedHeapClass::SlotOffset* slotOffsets = heap_cls->slotOffsets();
for (int i = 0; i < heap_cls->nslots(); i++) {
v->visit(*((Box**)((char*)b + slotOffsets[i])));
v->visit(&*((Box**)((char*)b + slotOffsets[i])));
}
}
} else {
......@@ -437,28 +437,28 @@ void BoxedFunction::gcHandler(GCVisitor* v, Box* b) {
// TODO eventually f->name should always be non-NULL, then there'd be no need for this check
if (f->name)
v->visit(f->name);
v->visit(&f->name);
if (f->modname)
v->visit(f->modname);
v->visit(&f->modname);
if (f->doc)
v->visit(f->doc);
v->visit(&f->doc);
if (f->closure)
v->visit(f->closure);
v->visit(&f->closure);
if (f->globals)
v->visit(f->globals);
v->visit(&f->globals);
// It's ok for f->defaults to be NULL here even if f->ndefaults isn't,
// since we could be collecting from inside a BoxedFunctionBase constructor
if (f->ndefaults) {
assert(f->defaults);
v->visit(f->defaults);
v->visit(&f->defaults);
// do a conservative scan since there can be NULLs in there:
v->visitPotentialRange(reinterpret_cast<void* const*>(&f->defaults->elts[0]),
reinterpret_cast<void* const*>(&f->defaults->elts[f->ndefaults]));
v->visitPotentialRange(reinterpret_cast<void**>(&f->defaults->elts[0]),
reinterpret_cast<void**>(&f->defaults->elts[f->ndefaults]));
}
}
......@@ -559,7 +559,7 @@ Box* BoxedModule::getLongConstant(llvm::StringRef ast_str) {
}
template <typename A, typename B, typename C> void visitContiguousMap(GCVisitor* v, ContiguousMap<A, B, C>& map) {
v->visitRange((void* const*)&map.vector()[0], (void* const*)&map.vector()[map.size()]);
v->visitRange(const_cast<B*>(&map.vector()[0]), const_cast<B*>(&map.vector()[map.size()]));
}
void BoxedModule::gcHandler(GCVisitor* v, Box* b) {
......@@ -574,7 +574,7 @@ void BoxedModule::gcHandler(GCVisitor* v, Box* b) {
visitContiguousMap(v, d->imaginary_constants);
visitContiguousMap(v, d->long_constants);
if (!d->keep_alive.empty())
v->visitRange((void**)&d->keep_alive[0], (void**)((&d->keep_alive[0]) + d->keep_alive.size()));
v->visitRange(&d->keep_alive[0], ((&d->keep_alive[0]) + d->keep_alive.size()));
}
// This mustn't throw; our IR generator generates calls to it without "invoke" even when there are exception handlers /
......@@ -1365,20 +1365,20 @@ void BoxedHeapClass::gcHandler(GCVisitor* v, Box* b) {
BoxedClass* cls = (BoxedClass*)b;
if (cls->tp_base)
v->visit(cls->tp_base);
v->visit(&cls->tp_base);
if (cls->tp_dict)
v->visit(cls->tp_dict);
v->visit(&cls->tp_dict);
if (cls->tp_mro)
v->visit(cls->tp_mro);
v->visit(&cls->tp_mro);
if (cls->tp_bases)
v->visit(cls->tp_bases);
v->visit(&cls->tp_bases);
if (cls->tp_subclasses)
v->visit(cls->tp_subclasses);
v->visit(&cls->tp_subclasses);
if (cls->tp_flags & Py_TPFLAGS_HEAPTYPE) {
BoxedHeapClass* hcls = static_cast<BoxedHeapClass*>(cls);
assert(hcls->ht_name);
v->visit(hcls->ht_name);
v->visit(&hcls->ht_name);
}
}
......@@ -1428,9 +1428,9 @@ void BoxedInstanceMethod::gcHandler(GCVisitor* v, Box* b) {
BoxedInstanceMethod* im = (BoxedInstanceMethod*)b;
v->visit(im->obj);
v->visit(im->func);
v->visit(im->im_class);
v->visit(&im->obj);
v->visit(&im->func);
v->visit(&im->im_class);
}
void BoxedProperty::gcHandler(GCVisitor* v, Box* b) {
......@@ -1439,13 +1439,13 @@ void BoxedProperty::gcHandler(GCVisitor* v, Box* b) {
BoxedProperty* prop = (BoxedProperty*)b;
if (prop->prop_get)
v->visit(prop->prop_get);
v->visit(&prop->prop_get);
if (prop->prop_set)
v->visit(prop->prop_set);
v->visit(&prop->prop_set);
if (prop->prop_del)
v->visit(prop->prop_del);
v->visit(&prop->prop_del);
if (prop->prop_doc)
v->visit(prop->prop_doc);
v->visit(&prop->prop_doc);
}
void BoxedStaticmethod::gcHandler(GCVisitor* v, Box* b) {
......@@ -1454,7 +1454,7 @@ void BoxedStaticmethod::gcHandler(GCVisitor* v, Box* b) {
BoxedStaticmethod* sm = (BoxedStaticmethod*)b;
if (sm->sm_callable)
v->visit(sm->sm_callable);
v->visit(&sm->sm_callable);
}
void BoxedClassmethod::gcHandler(GCVisitor* v, Box* b) {
......@@ -1463,7 +1463,7 @@ void BoxedClassmethod::gcHandler(GCVisitor* v, Box* b) {
BoxedClassmethod* cm = (BoxedClassmethod*)b;
if (cm->cm_callable)
v->visit(cm->cm_callable);
v->visit(&cm->cm_callable);
}
void BoxedSlice::gcHandler(GCVisitor* v, Box* b) {
......@@ -1473,15 +1473,15 @@ void BoxedSlice::gcHandler(GCVisitor* v, Box* b) {
BoxedSlice* sl = static_cast<BoxedSlice*>(b);
v->visit(sl->start);
v->visit(sl->stop);
v->visit(sl->step);
v->visit(&sl->start);
v->visit(&sl->stop);
v->visit(&sl->step);
}
static int call_gc_visit(PyObject* val, void* arg) {
if (val) {
GCVisitor* v = static_cast<GCVisitor*>(arg);
v->visit(val);
v->visit(&val);
}
return 0;
}
......@@ -1500,11 +1500,11 @@ void BoxedClosure::gcHandler(GCVisitor* v, Box* b) {
BoxedClosure* c = (BoxedClosure*)b;
if (c->parent)
v->visit(c->parent);
v->visit(&c->parent);
for (int i = 0; i < c->nelts; i++) {
if (c->elts[i])
v->visit(c->elts[i]);
v->visit(&c->elts[i]);
}
}
......@@ -2193,7 +2193,7 @@ public:
Box::gcHandler(v, b);
AttrWrapperIter* self = (AttrWrapperIter*)b;
v->visit(self->hcls);
v->visit(&self->hcls);
}
static Box* hasnext(Box* _self);
......@@ -2228,7 +2228,7 @@ public:
Box::gcHandler(v, b);
AttrWrapper* aw = (AttrWrapper*)b;
v->visit(aw->b);
v->visit(&aw->b);
}
static Box* setitem(Box* _self, Box* _key, Box* value) {
......@@ -3258,8 +3258,8 @@ void unicode_visit(GCVisitor* v, Box* b) {
Box::gcHandler(v, b);
PyUnicodeObject* u = (PyUnicodeObject*)b;
v->visit(u->str);
v->visit(u->defenc);
v->visit(&u->str);
v->visit(&u->defenc);
}
extern "C" PyUnicodeObject* unicode_empty;
......
......@@ -6,7 +6,7 @@ class C(object):
def f(self, i):
return i * i
def __getitem__(self, i):
if i < 1000:
if i < 200:
return self.f(i)
raise IndexError(i)
......@@ -25,7 +25,7 @@ class C2(object):
self.n += 1
return self.n * 2
def next(self):
if self.n < 1000:
if self.n < 200:
return self.f()
raise StopIteration()
......
......@@ -17,7 +17,7 @@ def call_function_far_up_the_stack(fn, num_calls_left=200):
# It's useful to call the GC at different locations in the stack in case that it's the
# call to the GC itself that left a lingering pointer (e.g. the pointer could be the
# __del__ attribute of an object we'd like to collect).
def call_gc_throughout_the_stack(number_of_gc_calls, num_calls_left=100):
def call_gc_throughout_the_stack(number_of_gc_calls, num_calls_left=30):
if num_calls_left > 0:
call_gc_throughout_the_stack(number_of_gc_calls, num_calls_left - 1)
if number_of_gc_calls >= num_calls_left:
......
......@@ -31,7 +31,7 @@ def p(x):
return [hex(ord(i)) for i in x]
s = u"\u20AC" # euro sign
print p(u"\N{EURO SIGN}")
print p(s)
print p(s)
print p(s.encode("utf8"))
print p(s.encode("utf16"))
print p(s.encode("utf32"))
......@@ -51,7 +51,7 @@ for i in xrange(100):
print repr(BaseException().__unicode__())
gc.collect()
# do some allocations:
for j in xrange(100):
for j in xrange(101):
[None] * j
print u'' in ''
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment