Commit 616dd7bf authored by Kevin Modzelewski's avatar Kevin Modzelewski Committed by GitHub

Merge pull request #1252 from kmod/telnetlib

Fix some leaks related to signals
parents 31a515b5 5466b67e
......@@ -78,7 +78,7 @@ before_script:
script:
- ccache -z
- ninja -j4 pyston check-deps && PYSTON_RUN_ARGS=G travis_wait 60 ctest --output-on-failure
- time ninja -j4 pyston check-deps && PYSTON_RUN_ARGS=G travis_wait 60 ctest --output-on-failure
- ccache -s
- if [ -n "$(git status --porcelain --untracked=no)" ]; then echo "test suite left the source directory dirty"; git status; false; fi
......
......@@ -145,6 +145,16 @@ PyAPI_FUNC(PyObject*) _PyGC_GetGarbage(void) PYSTON_NOEXCEPT;
PyAPI_FUNC(void) PyGC_Enable(void) PYSTON_NOEXCEPT;
PyAPI_FUNC(void) PyGC_Disable(void) PYSTON_NOEXCEPT;
#ifdef Py_TRACE_REFS
// This function is a semi-smart leak finder. Using the cycle-collector
// infrastructure, it will find all non-heap references remaining. This is
// an improvement over calling _Py_PrintReferenceAddresses, since this will
// automatically filter out any objects that are only indirectly leaked.
//
// This will destroy the heap, so it has to be the last thing called.
PyAPI_FUNC(void) _PyGC_FindLeaks(void) PYSTON_NOEXCEPT;
#endif
#ifdef __cplusplus
}
#endif
......
......@@ -876,6 +876,54 @@ get_time(void)
return result;
}
#ifdef Py_TRACE_REFS
// Similar to visit_decref, but changed to operate on all objects, not just
// gc-tracked ones.
static int
visit_findleaks(PyObject *op, void *data) {
assert(op != NULL);
op->ob_refcnt--;
return 0;
}
extern PyObject refchain;
// Pyston addition. Mostly copied from collect() but stripped down a lot.
void
_PyGC_FindLeaks(void)
{
int i;
PyGC_Head *young; /* the generation we are examining */
int generation = NUM_GENERATIONS - 1;
/* merge younger generations with one we are currently collecting */
for (i = 0; i < generation; i++) {
gc_list_merge(GEN_HEAD(i), GEN_HEAD(generation));
}
/* handy references */
young = GEN_HEAD(generation);
traverseproc traverse;
PyGC_Head *gc = young->gc.gc_next;
for (; gc != young; gc=gc->gc.gc_next) {
traverse = Py_TYPE(FROM_GC(gc))->tp_traverse;
(void) traverse(FROM_GC(gc),
(visitproc)visit_findleaks,
NULL);
}
PyObject* op;
fprintf(stderr, "Leaked references:\n");
for (op = refchain._ob_next; op != &refchain; op = op->_ob_next) {
if (op->ob_refcnt == 0)
continue;
fprintf(stderr, "%p [%" PY_FORMAT_SIZE_T "d] %s \033[40mwatch -l ((PyObject*)%p)->ob_refcnt\033[0m\n", op,
op->ob_refcnt, Py_TYPE(op)->tp_name, op);
}
}
#endif
/* This is the main function. Read this to understand how the
* collection process works. */
static Py_ssize_t
......
......@@ -1195,7 +1195,7 @@ extern "C" void _PyTrash_thread_destroy_chain() noexcept {
*/
extern "C" {
// static PyObject refchain = { &refchain, &refchain };
static PyObject refchain(Box::createRefchain());
PyObject refchain(Box::createRefchain());
}
/* Insert op at the front of the list of all objects. If force is true,
......
......@@ -1012,6 +1012,8 @@ Value ASTInterpreter::visit_stmt(AST_stmt* node) {
// ignore those while interpreting.
if ((((AST_Expr*)node)->value)->type != AST_TYPE::Str) {
rtn = visit_expr((AST_Expr*)node);
Py_DECREF(rtn.o);
rtn = Value();
ASTInterpreterJitInterface::pendingCallsCheckHelper();
}
break;
......@@ -1028,8 +1030,13 @@ Value ASTInterpreter::visit_stmt(AST_stmt* node) {
break;
case AST_TYPE::Return:
rtn = visit_return((AST_Return*)node);
try {
ASTInterpreterJitInterface::pendingCallsCheckHelper();
break;
} catch (ExcInfo e) {
Py_DECREF(rtn.o);
throw e;
}
return rtn;
case AST_TYPE::Global:
rtn = visit_global((AST_Global*)node);
ASTInterpreterJitInterface::pendingCallsCheckHelper();
......@@ -1041,13 +1048,20 @@ Value ASTInterpreter::visit_stmt(AST_stmt* node) {
break;
case AST_TYPE::Jump:
rtn = visit_jump((AST_Jump*)node);
break;
return rtn;
case AST_TYPE::Invoke:
rtn = visit_invoke((AST_Invoke*)node);
break;
default:
RELEASE_ASSERT(0, "not implemented");
};
// This assertion tries to make sure that we are refcount-safe if an exception
// is thrown from pendingCallsCheckHelper. Any statement that returns a value needs
// to be careful to wrap pendingCallsCheckHelper, and it can signal that it was careful
// by returning from the function instead of breaking.
assert(!rtn.o);
return rtn;
}
......
......@@ -373,10 +373,10 @@ public:
llvm_args.push_back(var->getValue());
llvm_args.push_back(converted_slice->getValue());
llvm::Value* uncasted
llvm::Instruction* uncasted
= emitter.createIC(pp, (void*)(target_exception_style == CAPI ? pyston::getitem_capi : pyston::getitem),
llvm_args, info.unw_info, target_exception_style, getNullPtr(g.llvm_value_type_ptr));
rtn = emitter.getBuilder()->CreateIntToPtr(uncasted, g.llvm_value_type_ptr);
rtn = createAfter<llvm::IntToPtrInst>(uncasted, uncasted, g.llvm_value_type_ptr, "");
emitter.setType(rtn, RefType::OWNED);
} else {
rtn = emitter.createCall2(
......@@ -419,9 +419,9 @@ public:
// var has __iter__()
emitter.setCurrentBasicBlock(bb_has_iter);
ICSetupInfo* pp = createGenericIC(info.getTypeRecorder(), true, 128);
llvm::Value* uncasted = emitter.createIC(pp, (void*)pyston::createBoxedIterWrapperIfNeeded,
llvm::Instruction* uncasted = emitter.createIC(pp, (void*)pyston::createBoxedIterWrapperIfNeeded,
{ converted_iter_call->getValue() }, info.unw_info);
llvm::Value* value_has_iter = emitter.getBuilder()->CreateIntToPtr(uncasted, g.llvm_value_type_ptr);
llvm::Value* value_has_iter = createAfter<llvm::IntToPtrInst>(uncasted, uncasted, g.llvm_value_type_ptr, "");
emitter.setType(value_has_iter, RefType::OWNED);
llvm::BasicBlock* value_has_iter_bb = emitter.currentBasicBlock();
auto has_iter_terminator = emitter.getBuilder()->CreateBr(bb_join);
......@@ -475,8 +475,8 @@ public:
llvm_args.push_back(converted_rhs->getValue());
llvm_args.push_back(getConstantInt(op_type, g.i32));
llvm::Value* uncasted = emitter.createIC(pp, rt_func_addr, llvm_args, info.unw_info);
rtn = emitter.getBuilder()->CreateIntToPtr(uncasted, g.llvm_value_type_ptr);
llvm::Instruction* uncasted = emitter.createIC(pp, rt_func_addr, llvm_args, info.unw_info);
rtn = createAfter<llvm::IntToPtrInst>(uncasted, uncasted, g.llvm_value_type_ptr, "");
} else {
rtn = emitter.createCall3(info.unw_info, rt_func, var->getValue(), converted_rhs->getValue(),
getConstantInt(op_type, g.i32));
......@@ -562,9 +562,9 @@ CompilerVariable* UnknownType::getattr(IREmitter& emitter, const OpInfo& info, C
llvm_args.push_back(var->getValue());
llvm_args.push_back(ptr);
llvm::Value* uncasted = emitter.createIC(pp, raw_func, llvm_args, info.unw_info, target_exception_style,
llvm::Instruction* uncasted = emitter.createIC(pp, raw_func, llvm_args, info.unw_info, target_exception_style,
getNullPtr(g.llvm_value_type_ptr));
rtn_val = emitter.getBuilder()->CreateIntToPtr(uncasted, g.llvm_value_type_ptr);
rtn_val = createAfter<llvm::IntToPtrInst>(uncasted, uncasted, g.llvm_value_type_ptr, "");
} else {
rtn_val = emitter.createCall2(info.unw_info, llvm_func, var->getValue(), ptr, target_exception_style,
getNullPtr(g.llvm_value_type_ptr));
......@@ -666,7 +666,7 @@ _call(IREmitter& emitter, const OpInfo& info, llvm::Value* func, ExceptionStyle
assert(llvm::cast<llvm::FunctionType>(llvm::cast<llvm::PointerType>(func->getType())->getElementType())
->getReturnType() == g.llvm_value_type_ptr);
rtn = emitter.getBuilder()->CreateIntToPtr(uncasted, g.llvm_value_type_ptr);
rtn = createAfter<llvm::IntToPtrInst>(uncasted, uncasted, g.llvm_value_type_ptr, "");
} else {
// printf("\n");
// func->dump();
......@@ -792,8 +792,8 @@ ConcreteCompilerVariable* UnknownType::unaryop(IREmitter& emitter, const OpInfo&
llvm_args.push_back(converted->getValue());
llvm_args.push_back(getConstantInt(op_type, g.i32));
llvm::Value* uncasted = emitter.createIC(pp, (void*)pyston::unaryop, llvm_args, info.unw_info);
rtn = emitter.getBuilder()->CreateIntToPtr(uncasted, g.llvm_value_type_ptr);
llvm::Instruction* uncasted = emitter.createIC(pp, (void*)pyston::unaryop, llvm_args, info.unw_info);
rtn = createAfter<llvm::IntToPtrInst>(uncasted, uncasted, g.llvm_value_type_ptr, "");
} else {
rtn = emitter.createCall2(info.unw_info, g.funcs.unaryop, converted->getValue(),
getConstantInt(op_type, g.i32));
......
......@@ -403,7 +403,8 @@ private:
if (exc_dest) {
builder.CreateInvoke(g.funcs.makePendingCalls, join_block, exc_dest);
} else {
builder.CreateCall(g.funcs.makePendingCalls);
auto call = builder.CreateCall(g.funcs.makePendingCalls);
irstate->getRefcounts()->setMayThrow(call);
builder.CreateBr(join_block);
}
}
......@@ -676,10 +677,10 @@ public:
llvm::Value* createDeopt(AST_stmt* current_stmt, AST_expr* node, llvm::Value* node_value) override {
ICSetupInfo* pp = createDeoptIC();
llvm::Value* v
llvm::Instruction* v
= createIC(pp, (void*)pyston::deopt, { embedRelocatablePtr(node, g.llvm_astexpr_type_ptr), node_value },
UnwindInfo(current_stmt, NULL, /* is_after_deopt*/ true));
llvm::Value* rtn = getBuilder()->CreateIntToPtr(v, g.llvm_value_type_ptr);
llvm::Value* rtn = createAfter<llvm::IntToPtrInst>(v, v, g.llvm_value_type_ptr, "");
setType(rtn, RefType::OWNED);
return rtn;
}
......@@ -1293,8 +1294,8 @@ private:
llvm_args.push_back(emitter.setType(embedRelocatablePtr(node->id.getBox(), g.llvm_boxedstring_type_ptr),
RefType::BORROWED));
llvm::Value* uncasted = emitter.createIC(pp, (void*)pyston::getGlobal, llvm_args, unw_info);
llvm::Value* r = emitter.getBuilder()->CreateIntToPtr(uncasted, g.llvm_value_type_ptr);
llvm::Instruction* uncasted = emitter.createIC(pp, (void*)pyston::getGlobal, llvm_args, unw_info);
llvm::Value* r = createAfter<llvm::IntToPtrInst>(uncasted, uncasted, g.llvm_value_type_ptr, "");
emitter.setType(r, RefType::OWNED);
return new ConcreteCompilerVariable(UNKNOWN, r);
} else {
......@@ -1432,9 +1433,9 @@ private:
ConcreteCompilerVariable* cvar = var->makeConverted(emitter, var->getBoxType());
std::vector<llvm::Value*> args{ cvar->getValue() };
llvm::Value* rtn = emitter.createCall(unw_info, g.funcs.repr, args);
emitter.setType(rtn, RefType::BORROWED); // Well, really it's owned, and we handoff the ref to the bitcast
rtn = emitter.getBuilder()->CreateBitCast(rtn, g.llvm_value_type_ptr);
llvm::Instruction* uncasted = emitter.createCall(unw_info, g.funcs.repr, args);
emitter.setType(uncasted, RefType::BORROWED); // Well, really it's owned, and we handoff the ref to the bitcast
auto rtn = createAfter<llvm::BitCastInst>(uncasted, uncasted, g.llvm_value_type_ptr, "");
emitter.setType(rtn, RefType::OWNED);
return new ConcreteCompilerVariable(STR, rtn);
......
......@@ -53,6 +53,26 @@ static int numPredecessors(llvm::BasicBlock* b) {
llvm::Value* RefcountTracker::setType(llvm::Value* v, RefType reftype) {
assert(!llvm::isa<llvm::UndefValue>(v));
// Force tracked cast expressions to be immediately after the thing they cast.
// Otherwise there is the opportunity for things to happen between them, which
// may cause the refcount state to be examined, and the setType() call will not
// be seen yet.
//
// We could relax this restriction by looking through the cast, or by requiring
// the caller to also call setType() on the uncasted value. This is a simpler
// fix for now though.
if (llvm::CastInst* cast = llvm::dyn_cast<llvm::CastInst>(v)) {
auto uncasted = cast->getOperand(0);
auto uncasted_inst = llvm::cast<llvm::Instruction>(uncasted);
auto uncasted_invoke = llvm::dyn_cast<llvm::InvokeInst>(uncasted_inst);
if (uncasted_invoke)
assert(uncasted_invoke->getNormalDest()->getFirstNonPHI() == cast
&& "Refcount-tracked casts must be immediately after the value they cast");
else
assert(uncasted_inst->getNextNode() == cast
&& "Refcount-tracked casts must be immediately after the value they cast");
}
auto& var = this->vars[v];
assert(var.reftype == reftype || var.reftype == RefType::UNKNOWN);
......@@ -898,6 +918,7 @@ void RefcountTracker::addRefcounts(IRGenState* irstate) {
if (state.ending_refs[inst] != starting_refs) {
llvm::Instruction* insertion_pt = NULL;
llvm::BasicBlock* insertion_block = NULL, * insertion_from_block = NULL;
assert(inst != inst->getParent()->getTerminator());
insertion_pt = inst->getNextNode();
while (llvm::isa<llvm::PHINode>(insertion_pt)) {
insertion_pt = insertion_pt->getNextNode();
......
......@@ -19,6 +19,8 @@
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/Instructions.h"
namespace llvm {
class Constant;
......@@ -46,6 +48,20 @@ const void* getValueOfRelocatableSym(const std::string& str);
void visitRelocatableSymsMap(gc::GCVisitor* visitor);
void dumpPrettyIR(llvm::Function* f);
// Insert an instruction at the first valid point *after* the given instruction.
// The non-triviality of this is that if the given instruction is an invoke, we have
// to be careful about where we place the new instruction -- this puts it on the
// normal-case destination.
//
// Note: I wish the `create_after` argument could be placed after the `Args... args` one.
// And I think that that should be valid, but clang doesn't seem to be accepting it.
template <typename T, typename... Args> T* createAfter(llvm::Instruction* create_after, Args... args) {
if (llvm::InvokeInst* ii = llvm::dyn_cast<llvm::InvokeInst>(create_after)) {
return new T(args..., ii->getNormalDest()->getFirstInsertionPt());
} else
return new T(args..., create_after->getNextNode());
}
}
#endif
......@@ -4741,7 +4741,7 @@ extern "C" void Py_Finalize() noexcept {
if (assert_refs) {
#ifdef Py_TRACE_REFS
if (_Py_RefTotal != 0)
_Py_PrintReferenceAddressesCapped(stderr, 10);
_PyGC_FindLeaks();
#endif
RELEASE_ASSERT(_Py_RefTotal == 0, "%ld refs remaining!", _Py_RefTotal);
......
import os
import signal
import threading
import time
def f():
time.sleep(0.1)
os.kill(os.getpid(), signal.SIGINT)
t = threading.Thread(target=f)
t.start()
def g():
while True:
-(0.2 ** 5)
try:
g()
assert 0
except KeyboardInterrupt:
pass
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment