Commit c8bf02a1 authored by Mark Florisson's avatar Mark Florisson

Cleanup object temporaries in parallel contexts from with gil blocks

parent 630d0edd
...@@ -6935,6 +6935,7 @@ class ParallelStatNode(StatNode, ParallelNode): ...@@ -6935,6 +6935,7 @@ class ParallelStatNode(StatNode, ParallelNode):
body = None body = None
is_prange = False is_prange = False
is_nested_prange = False
error_label_used = False error_label_used = False
...@@ -7250,15 +7251,16 @@ class ParallelStatNode(StatNode, ParallelNode): ...@@ -7250,15 +7251,16 @@ class ParallelStatNode(StatNode, ParallelNode):
c.put(" shared(%s)" % ', '.join(shared_vars)) c.put(" shared(%s)" % ', '.join(shared_vars))
def cleanup_slice_temps(self, code): def cleanup_temps(self, code):
# Now clean up any memoryview slice temporaries # Now clean up any memoryview slice and object temporaries
first = True if self.is_parallel and not self.is_nested_prange:
for temp, type in self.temps: code.putln("/* Clean up any temporaries */")
if type.is_memoryviewslice: for temp, type in self.temps:
if first: if type.is_memoryviewslice:
first = False code.put_xdecref_memoryviewslice(temp, have_gil=False)
code.putln("/* Clean up any temporary slices */") elif type.is_pyobject:
code.put_xdecref_memoryviewslice(temp, have_gil=False) code.put_xdecref(temp, type)
code.putln("%s = NULL;" % temp)
def setup_parallel_control_flow_block(self, code): def setup_parallel_control_flow_block(self, code):
""" """
...@@ -7306,7 +7308,15 @@ class ParallelStatNode(StatNode, ParallelNode): ...@@ -7306,7 +7308,15 @@ class ParallelStatNode(StatNode, ParallelNode):
self.begin_of_parallel_block = code.insertion_point() self.begin_of_parallel_block = code.insertion_point()
def end_parallel_block(self, code): def end_parallel_block(self, code):
"Acquire the GIL, deallocate threadstate, release" """
To ensure all OpenMP threads have thread states, we ensure the GIL
in each thread (which creates a thread state if it doesn't exist),
after which we release the GIL.
On exit, reacquire the GIL and release the thread state.
If compiled without OpenMP support (at the C level), then we still have
to acquire the GIL to decref any object temporaries.
"""
if self.error_label_used: if self.error_label_used:
begin_code = self.begin_of_parallel_block begin_code = self.begin_of_parallel_block
end_code = code end_code = code
...@@ -7318,11 +7328,16 @@ class ParallelStatNode(StatNode, ParallelNode): ...@@ -7318,11 +7328,16 @@ class ParallelStatNode(StatNode, ParallelNode):
end_code.putln("#ifdef _OPENMP") end_code.putln("#ifdef _OPENMP")
end_code.putln("Py_END_ALLOW_THREADS") end_code.putln("Py_END_ALLOW_THREADS")
end_code.putln("#else")
end_code.put_safe("{\n")
end_code.put_ensure_gil()
end_code.putln("#endif /* _OPENMP */")
self.cleanup_temps(end_code)
end_code.put_release_ensured_gil() end_code.put_release_ensured_gil()
end_code.putln("#ifndef _OPENMP")
end_code.put_safe("}\n")
end_code.putln("#endif /* _OPENMP */") end_code.putln("#endif /* _OPENMP */")
self.cleanup_slice_temps(code)
def trap_parallel_exit(self, code, should_flush=False): def trap_parallel_exit(self, code, should_flush=False):
""" """
Trap any kind of return inside a parallel construct. 'should_flush' Trap any kind of return inside a parallel construct. 'should_flush'
...@@ -7643,7 +7658,6 @@ class ParallelRangeNode(ParallelStatNode): ...@@ -7643,7 +7658,6 @@ class ParallelRangeNode(ParallelStatNode):
start = stop = step = None start = stop = step = None
is_prange = True is_prange = True
is_nested_prange = False
nogil = None nogil = None
schedule = None schedule = None
......
...@@ -681,3 +681,26 @@ def test_chunksize(): ...@@ -681,3 +681,26 @@ def test_chunksize():
sum += i sum += i
print sum print sum
cdef class PrintOnDealloc(object):
def __dealloc__(self):
print "deallocating..."
def error():
raise Exception("propagate me")
def test_clean_temps():
"""
>>> test_clean_temps()
deallocating...
propagate me
"""
cdef Py_ssize_t i
try:
for i in prange(100, nogil=True, num_threads=1):
with gil:
x = PrintOnDealloc() + error()
except Exception, e:
print e.args[0]
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment