Commit 84ed9a68 authored by Richard Oudkerk's avatar Richard Oudkerk

Issue #8713: Support alternative start methods in multiprocessing on Unix.

See http://hg.python.org/sandbox/sbt#spawn
parent d06eeb4a
#
# Simple benchmarks for the multiprocessing package
#
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
import time
import multiprocessing
import threading
import queue
import gc
_timer = time.perf_counter
delta = 1
#### TEST_QUEUESPEED
def queuespeed_func(q, c, iterations):
a = '0' * 256
c.acquire()
c.notify()
c.release()
for i in range(iterations):
q.put(a)
q.put('STOP')
def test_queuespeed(Process, q, c):
elapsed = 0
iterations = 1
while elapsed < delta:
iterations *= 2
p = Process(target=queuespeed_func, args=(q, c, iterations))
c.acquire()
p.start()
c.wait()
c.release()
result = None
t = _timer()
while result != 'STOP':
result = q.get()
elapsed = _timer() - t
p.join()
print(iterations, 'objects passed through the queue in', elapsed, 'seconds')
print('average number/sec:', iterations/elapsed)
#### TEST_PIPESPEED
def pipe_func(c, cond, iterations):
a = '0' * 256
cond.acquire()
cond.notify()
cond.release()
for i in range(iterations):
c.send(a)
c.send('STOP')
def test_pipespeed():
c, d = multiprocessing.Pipe()
cond = multiprocessing.Condition()
elapsed = 0
iterations = 1
while elapsed < delta:
iterations *= 2
p = multiprocessing.Process(target=pipe_func,
args=(d, cond, iterations))
cond.acquire()
p.start()
cond.wait()
cond.release()
result = None
t = _timer()
while result != 'STOP':
result = c.recv()
elapsed = _timer() - t
p.join()
print(iterations, 'objects passed through connection in',elapsed,'seconds')
print('average number/sec:', iterations/elapsed)
#### TEST_SEQSPEED
def test_seqspeed(seq):
elapsed = 0
iterations = 1
while elapsed < delta:
iterations *= 2
t = _timer()
for i in range(iterations):
a = seq[5]
elapsed = _timer() - t
print(iterations, 'iterations in', elapsed, 'seconds')
print('average number/sec:', iterations/elapsed)
#### TEST_LOCK
def test_lockspeed(l):
elapsed = 0
iterations = 1
while elapsed < delta:
iterations *= 2
t = _timer()
for i in range(iterations):
l.acquire()
l.release()
elapsed = _timer() - t
print(iterations, 'iterations in', elapsed, 'seconds')
print('average number/sec:', iterations/elapsed)
#### TEST_CONDITION
def conditionspeed_func(c, N):
c.acquire()
c.notify()
for i in range(N):
c.wait()
c.notify()
c.release()
def test_conditionspeed(Process, c):
elapsed = 0
iterations = 1
while elapsed < delta:
iterations *= 2
c.acquire()
p = Process(target=conditionspeed_func, args=(c, iterations))
p.start()
c.wait()
t = _timer()
for i in range(iterations):
c.notify()
c.wait()
elapsed = _timer() - t
c.release()
p.join()
print(iterations * 2, 'waits in', elapsed, 'seconds')
print('average number/sec:', iterations * 2 / elapsed)
####
def test():
manager = multiprocessing.Manager()
gc.disable()
print('\n\t######## testing Queue.Queue\n')
test_queuespeed(threading.Thread, queue.Queue(),
threading.Condition())
print('\n\t######## testing multiprocessing.Queue\n')
test_queuespeed(multiprocessing.Process, multiprocessing.Queue(),
multiprocessing.Condition())
print('\n\t######## testing Queue managed by server process\n')
test_queuespeed(multiprocessing.Process, manager.Queue(),
manager.Condition())
print('\n\t######## testing multiprocessing.Pipe\n')
test_pipespeed()
print()
print('\n\t######## testing list\n')
test_seqspeed(list(range(10)))
print('\n\t######## testing list managed by server process\n')
test_seqspeed(manager.list(list(range(10))))
print('\n\t######## testing Array("i", ..., lock=False)\n')
test_seqspeed(multiprocessing.Array('i', list(range(10)), lock=False))
print('\n\t######## testing Array("i", ..., lock=True)\n')
test_seqspeed(multiprocessing.Array('i', list(range(10)), lock=True))
print()
print('\n\t######## testing threading.Lock\n')
test_lockspeed(threading.Lock())
print('\n\t######## testing threading.RLock\n')
test_lockspeed(threading.RLock())
print('\n\t######## testing multiprocessing.Lock\n')
test_lockspeed(multiprocessing.Lock())
print('\n\t######## testing multiprocessing.RLock\n')
test_lockspeed(multiprocessing.RLock())
print('\n\t######## testing lock managed by server process\n')
test_lockspeed(manager.Lock())
print('\n\t######## testing rlock managed by server process\n')
test_lockspeed(manager.RLock())
print()
print('\n\t######## testing threading.Condition\n')
test_conditionspeed(threading.Thread, threading.Condition())
print('\n\t######## testing multiprocessing.Condition\n')
test_conditionspeed(multiprocessing.Process, multiprocessing.Condition())
print('\n\t######## testing condition managed by a server process\n')
test_conditionspeed(multiprocessing.Process, manager.Condition())
gc.enable()
if __name__ == '__main__':
multiprocessing.freeze_support()
test()
#
# This module shows how to use arbitrary callables with a subclass of
# `BaseManager`.
#
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
from multiprocessing import freeze_support from multiprocessing import freeze_support
from multiprocessing.managers import BaseManager, BaseProxy from multiprocessing.managers import BaseManager, BaseProxy
import operator import operator
...@@ -27,11 +19,9 @@ def baz(): ...@@ -27,11 +19,9 @@ def baz():
# Proxy type for generator objects # Proxy type for generator objects
class GeneratorProxy(BaseProxy): class GeneratorProxy(BaseProxy):
_exposed_ = ('next', '__next__') _exposed_ = ['__next__']
def __iter__(self): def __iter__(self):
return self return self
def __next__(self):
return self._callmethod('next')
def __next__(self): def __next__(self):
return self._callmethod('__next__') return self._callmethod('__next__')
...@@ -90,8 +80,6 @@ def test(): ...@@ -90,8 +80,6 @@ def test():
op = manager.operator() op = manager.operator()
print('op.add(23, 45) =', op.add(23, 45)) print('op.add(23, 45) =', op.add(23, 45))
print('op.pow(2, 94) =', op.pow(2, 94)) print('op.pow(2, 94) =', op.pow(2, 94))
print('op.getslice(range(10), 2, 6) =', op.getslice(list(range(10)), 2, 6))
print('op.repeat(range(5), 3) =', op.repeat(list(range(5)), 3))
print('op._exposed_ =', op._exposed_) print('op._exposed_ =', op._exposed_)
## ##
......
#
# A test of `multiprocessing.Pool` class
#
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
import multiprocessing import multiprocessing
import time import time
import random import random
...@@ -46,269 +39,115 @@ def noop(x): ...@@ -46,269 +39,115 @@ def noop(x):
# #
def test(): def test():
print('cpu_count() = %d\n' % multiprocessing.cpu_count())
#
# Create pool
#
PROCESSES = 4 PROCESSES = 4
print('Creating pool with %d processes\n' % PROCESSES) print('Creating pool with %d processes\n' % PROCESSES)
pool = multiprocessing.Pool(PROCESSES)
print('pool = %s' % pool)
print()
#
# Tests
#
TASKS = [(mul, (i, 7)) for i in range(10)] + \
[(plus, (i, 8)) for i in range(10)]
results = [pool.apply_async(calculate, t) for t in TASKS]
imap_it = pool.imap(calculatestar, TASKS)
imap_unordered_it = pool.imap_unordered(calculatestar, TASKS)
print('Ordered results using pool.apply_async():')
for r in results:
print('\t', r.get())
print()
print('Ordered results using pool.imap():')
for x in imap_it:
print('\t', x)
print()
print('Unordered results using pool.imap_unordered():')
for x in imap_unordered_it:
print('\t', x)
print()
print('Ordered results using pool.map() --- will block till complete:')
for x in pool.map(calculatestar, TASKS):
print('\t', x)
print()
#
# Simple benchmarks
#
N = 100000
print('def pow3(x): return x**3')
t = time.time() with multiprocessing.Pool(PROCESSES) as pool:
A = list(map(pow3, range(N))) #
print('\tmap(pow3, range(%d)):\n\t\t%s seconds' % \ # Tests
(N, time.time() - t)) #
t = time.time() TASKS = [(mul, (i, 7)) for i in range(10)] + \
B = pool.map(pow3, range(N)) [(plus, (i, 8)) for i in range(10)]
print('\tpool.map(pow3, range(%d)):\n\t\t%s seconds' % \
(N, time.time() - t))
t = time.time() results = [pool.apply_async(calculate, t) for t in TASKS]
C = list(pool.imap(pow3, range(N), chunksize=N//8)) imap_it = pool.imap(calculatestar, TASKS)
print('\tlist(pool.imap(pow3, range(%d), chunksize=%d)):\n\t\t%s' \ imap_unordered_it = pool.imap_unordered(calculatestar, TASKS)
' seconds' % (N, N//8, time.time() - t))
assert A == B == C, (len(A), len(B), len(C)) print('Ordered results using pool.apply_async():')
print() for r in results:
print('\t', r.get())
print()
L = [None] * 1000000 print('Ordered results using pool.imap():')
print('def noop(x): pass') for x in imap_it:
print('L = [None] * 1000000') print('\t', x)
print()
t = time.time() print('Unordered results using pool.imap_unordered():')
A = list(map(noop, L)) for x in imap_unordered_it:
print('\tmap(noop, L):\n\t\t%s seconds' % \ print('\t', x)
(time.time() - t)) print()
t = time.time() print('Ordered results using pool.map() --- will block till complete:')
B = pool.map(noop, L) for x in pool.map(calculatestar, TASKS):
print('\tpool.map(noop, L):\n\t\t%s seconds' % \ print('\t', x)
(time.time() - t)) print()
t = time.time() #
C = list(pool.imap(noop, L, chunksize=len(L)//8)) # Test error handling
print('\tlist(pool.imap(noop, L, chunksize=%d)):\n\t\t%s seconds' % \ #
(len(L)//8, time.time() - t))
assert A == B == C, (len(A), len(B), len(C)) print('Testing error handling:')
print()
del A, B, C, L
#
# Test error handling
#
print('Testing error handling:')
try:
print(pool.apply(f, (5,)))
except ZeroDivisionError:
print('\tGot ZeroDivisionError as expected from pool.apply()')
else:
raise AssertionError('expected ZeroDivisionError')
try:
print(pool.map(f, list(range(10))))
except ZeroDivisionError:
print('\tGot ZeroDivisionError as expected from pool.map()')
else:
raise AssertionError('expected ZeroDivisionError')
try:
print(list(pool.imap(f, list(range(10)))))
except ZeroDivisionError:
print('\tGot ZeroDivisionError as expected from list(pool.imap())')
else:
raise AssertionError('expected ZeroDivisionError')
it = pool.imap(f, list(range(10)))
for i in range(10):
try: try:
x = next(it) print(pool.apply(f, (5,)))
except ZeroDivisionError: except ZeroDivisionError:
if i == 5: print('\tGot ZeroDivisionError as expected from pool.apply()')
pass
except StopIteration:
break
else: else:
if i == 5: raise AssertionError('expected ZeroDivisionError')
raise AssertionError('expected ZeroDivisionError')
assert i == 9
print('\tGot ZeroDivisionError as expected from IMapIterator.next()')
print()
#
# Testing timeouts
#
print('Testing ApplyResult.get() with timeout:', end=' ')
res = pool.apply_async(calculate, TASKS[0])
while 1:
sys.stdout.flush()
try: try:
sys.stdout.write('\n\t%s' % res.get(0.02)) print(pool.map(f, list(range(10))))
break except ZeroDivisionError:
except multiprocessing.TimeoutError: print('\tGot ZeroDivisionError as expected from pool.map()')
sys.stdout.write('.') else:
print() raise AssertionError('expected ZeroDivisionError')
print()
print('Testing IMapIterator.next() with timeout:', end=' ')
it = pool.imap(calculatestar, TASKS)
while 1:
sys.stdout.flush()
try: try:
sys.stdout.write('\n\t%s' % it.next(0.02)) print(list(pool.imap(f, list(range(10)))))
except StopIteration: except ZeroDivisionError:
break print('\tGot ZeroDivisionError as expected from list(pool.imap())')
except multiprocessing.TimeoutError: else:
sys.stdout.write('.') raise AssertionError('expected ZeroDivisionError')
print()
print() it = pool.imap(f, list(range(10)))
for i in range(10):
# try:
# Testing callback x = next(it)
# except ZeroDivisionError:
if i == 5:
print('Testing callback:') pass
except StopIteration:
A = [] break
B = [56, 0, 1, 8, 27, 64, 125, 216, 343, 512, 729] else:
if i == 5:
r = pool.apply_async(mul, (7, 8), callback=A.append) raise AssertionError('expected ZeroDivisionError')
r.wait()
assert i == 9
r = pool.map_async(pow3, list(range(10)), callback=A.extend) print('\tGot ZeroDivisionError as expected from IMapIterator.next()')
r.wait() print()
if A == B: #
print('\tcallbacks succeeded\n') # Testing timeouts
else: #
print('\t*** callbacks failed\n\t\t%s != %s\n' % (A, B))
print('Testing ApplyResult.get() with timeout:', end=' ')
# res = pool.apply_async(calculate, TASKS[0])
# Check there are no outstanding tasks while 1:
# sys.stdout.flush()
try:
assert not pool._cache, 'cache = %r' % pool._cache sys.stdout.write('\n\t%s' % res.get(0.02))
break
# except multiprocessing.TimeoutError:
# Check close() methods sys.stdout.write('.')
# print()
print()
print('Testing close():')
print('Testing IMapIterator.next() with timeout:', end=' ')
for worker in pool._pool: it = pool.imap(calculatestar, TASKS)
assert worker.is_alive() while 1:
sys.stdout.flush()
result = pool.apply_async(time.sleep, [0.5]) try:
pool.close() sys.stdout.write('\n\t%s' % it.next(0.02))
pool.join() except StopIteration:
break
assert result.get() is None except multiprocessing.TimeoutError:
sys.stdout.write('.')
for worker in pool._pool: print()
assert not worker.is_alive() print()
print('\tclose() succeeded\n')
#
# Check terminate() method
#
print('Testing terminate():')
pool = multiprocessing.Pool(2)
DELTA = 0.1
ignore = pool.apply(pow3, [2])
results = [pool.apply_async(time.sleep, [DELTA]) for i in range(100)]
pool.terminate()
pool.join()
for worker in pool._pool:
assert not worker.is_alive()
print('\tterminate() succeeded\n')
#
# Check garbage collection
#
print('Testing garbage collection:')
pool = multiprocessing.Pool(2)
DELTA = 0.1
processes = pool._pool
ignore = pool.apply(pow3, [2])
results = [pool.apply_async(time.sleep, [DELTA]) for i in range(100)]
results = pool = None
time.sleep(DELTA * 2)
for worker in processes:
assert not worker.is_alive()
print('\tgarbage collection succeeded\n')
if __name__ == '__main__': if __name__ == '__main__':
multiprocessing.freeze_support() multiprocessing.freeze_support()
assert len(sys.argv) in (1, 2)
if len(sys.argv) == 1 or sys.argv[1] == 'processes':
print(' Using processes '.center(79, '-'))
elif sys.argv[1] == 'threads':
print(' Using threads '.center(79, '-'))
import multiprocessing.dummy as multiprocessing
else:
print('Usage:\n\t%s [processes | threads]' % sys.argv[0])
raise SystemExit(2)
test() test()
#
# A test file for the `multiprocessing` package
#
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
import time
import sys
import random
from queue import Empty
import multiprocessing # may get overwritten
#### TEST_VALUE
def value_func(running, mutex):
random.seed()
time.sleep(random.random()*4)
mutex.acquire()
print('\n\t\t\t' + str(multiprocessing.current_process()) + ' has finished')
running.value -= 1
mutex.release()
def test_value():
TASKS = 10
running = multiprocessing.Value('i', TASKS)
mutex = multiprocessing.Lock()
for i in range(TASKS):
p = multiprocessing.Process(target=value_func, args=(running, mutex))
p.start()
while running.value > 0:
time.sleep(0.08)
mutex.acquire()
print(running.value, end=' ')
sys.stdout.flush()
mutex.release()
print()
print('No more running processes')
#### TEST_QUEUE
def queue_func(queue):
for i in range(30):
time.sleep(0.5 * random.random())
queue.put(i*i)
queue.put('STOP')
def test_queue():
q = multiprocessing.Queue()
p = multiprocessing.Process(target=queue_func, args=(q,))
p.start()
o = None
while o != 'STOP':
try:
o = q.get(timeout=0.3)
print(o, end=' ')
sys.stdout.flush()
except Empty:
print('TIMEOUT')
print()
#### TEST_CONDITION
def condition_func(cond):
cond.acquire()
print('\t' + str(cond))
time.sleep(2)
print('\tchild is notifying')
print('\t' + str(cond))
cond.notify()
cond.release()
def test_condition():
cond = multiprocessing.Condition()
p = multiprocessing.Process(target=condition_func, args=(cond,))
print(cond)
cond.acquire()
print(cond)
cond.acquire()
print(cond)
p.start()
print('main is waiting')
cond.wait()
print('main has woken up')
print(cond)
cond.release()
print(cond)
cond.release()
p.join()
print(cond)
#### TEST_SEMAPHORE
def semaphore_func(sema, mutex, running):
sema.acquire()
mutex.acquire()
running.value += 1
print(running.value, 'tasks are running')
mutex.release()
random.seed()
time.sleep(random.random()*2)
mutex.acquire()
running.value -= 1
print('%s has finished' % multiprocessing.current_process())
mutex.release()
sema.release()
def test_semaphore():
sema = multiprocessing.Semaphore(3)
mutex = multiprocessing.RLock()
running = multiprocessing.Value('i', 0)
processes = [
multiprocessing.Process(target=semaphore_func,
args=(sema, mutex, running))
for i in range(10)
]
for p in processes:
p.start()
for p in processes:
p.join()
#### TEST_JOIN_TIMEOUT
def join_timeout_func():
print('\tchild sleeping')
time.sleep(5.5)
print('\n\tchild terminating')
def test_join_timeout():
p = multiprocessing.Process(target=join_timeout_func)
p.start()
print('waiting for process to finish')
while 1:
p.join(timeout=1)
if not p.is_alive():
break
print('.', end=' ')
sys.stdout.flush()
#### TEST_EVENT
def event_func(event):
print('\t%r is waiting' % multiprocessing.current_process())
event.wait()
print('\t%r has woken up' % multiprocessing.current_process())
def test_event():
event = multiprocessing.Event()
processes = [multiprocessing.Process(target=event_func, args=(event,))
for i in range(5)]
for p in processes:
p.start()
print('main is sleeping')
time.sleep(2)
print('main is setting event')
event.set()
for p in processes:
p.join()
#### TEST_SHAREDVALUES
def sharedvalues_func(values, arrays, shared_values, shared_arrays):
for i in range(len(values)):
v = values[i][1]
sv = shared_values[i].value
assert v == sv
for i in range(len(values)):
a = arrays[i][1]
sa = list(shared_arrays[i][:])
assert a == sa
print('Tests passed')
def test_sharedvalues():
values = [
('i', 10),
('h', -2),
('d', 1.25)
]
arrays = [
('i', list(range(100))),
('d', [0.25 * i for i in range(100)]),
('H', list(range(1000)))
]
shared_values = [multiprocessing.Value(id, v) for id, v in values]
shared_arrays = [multiprocessing.Array(id, a) for id, a in arrays]
p = multiprocessing.Process(
target=sharedvalues_func,
args=(values, arrays, shared_values, shared_arrays)
)
p.start()
p.join()
assert p.exitcode == 0
####
def test(namespace=multiprocessing):
global multiprocessing
multiprocessing = namespace
for func in [test_value, test_queue, test_condition,
test_semaphore, test_join_timeout, test_event,
test_sharedvalues]:
print('\n\t######## %s\n' % func.__name__)
func()
ignore = multiprocessing.active_children() # cleanup any old processes
if hasattr(multiprocessing, '_debug_info'):
info = multiprocessing._debug_info()
if info:
print(info)
raise ValueError('there should be no positive refcounts left')
if __name__ == '__main__':
multiprocessing.freeze_support()
assert len(sys.argv) in (1, 2)
if len(sys.argv) == 1 or sys.argv[1] == 'processes':
print(' Using processes '.center(79, '-'))
namespace = multiprocessing
elif sys.argv[1] == 'manager':
print(' Using processes and a manager '.center(79, '-'))
namespace = multiprocessing.Manager()
namespace.Process = multiprocessing.Process
namespace.current_process = multiprocessing.current_process
namespace.active_children = multiprocessing.active_children
elif sys.argv[1] == 'threads':
print(' Using threads '.center(79, '-'))
import multiprocessing.dummy as namespace
else:
print('Usage:\n\t%s [processes | manager | threads]' % sys.argv[0])
raise SystemExit(2)
test(namespace)
#
# Example where a pool of http servers share a single listening socket
#
# On Windows this module depends on the ability to pickle a socket
# object so that the worker processes can inherit a copy of the server
# object. (We import `multiprocessing.reduction` to enable this pickling.)
#
# Not sure if we should synchronize access to `socket.accept()` method by
# using a process-shared lock -- does not seem to be necessary.
#
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
import os
import sys
from multiprocessing import Process, current_process, freeze_support
from http.server import HTTPServer
from http.server import SimpleHTTPRequestHandler
if sys.platform == 'win32':
import multiprocessing.reduction # make sockets pickable/inheritable
def note(format, *args):
sys.stderr.write('[%s]\t%s\n' % (current_process().name, format % args))
class RequestHandler(SimpleHTTPRequestHandler):
# we override log_message() to show which process is handling the request
def log_message(self, format, *args):
note(format, *args)
def serve_forever(server):
note('starting server')
try:
server.serve_forever()
except KeyboardInterrupt:
pass
def runpool(address, number_of_processes):
# create a single server object -- children will each inherit a copy
server = HTTPServer(address, RequestHandler)
# create child processes to act as workers
for i in range(number_of_processes - 1):
Process(target=serve_forever, args=(server,)).start()
# main process also acts as a worker
serve_forever(server)
def test():
DIR = os.path.join(os.path.dirname(__file__), '..')
ADDRESS = ('localhost', 8000)
NUMBER_OF_PROCESSES = 4
print('Serving at http://%s:%d using %d worker processes' % \
(ADDRESS[0], ADDRESS[1], NUMBER_OF_PROCESSES))
print('To exit press Ctrl-' + ['C', 'Break'][sys.platform=='win32'])
os.chdir(DIR)
runpool(ADDRESS, NUMBER_OF_PROCESSES)
if __name__ == '__main__':
freeze_support()
test()
#
# Simple example which uses a pool of workers to carry out some tasks.
#
# Notice that the results will probably not come out of the output
# queue in the same in the same order as the corresponding tasks were
# put on the input queue. If it is important to get the results back
# in the original order then consider using `Pool.map()` or
# `Pool.imap()` (which will save on the amount of code needed anyway).
#
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
import time import time
import random import random
......
This diff is collapsed.
...@@ -108,6 +108,8 @@ Significantly Improved Library Modules: ...@@ -108,6 +108,8 @@ Significantly Improved Library Modules:
* Single-dispatch generic functions (:pep:`443`) * Single-dispatch generic functions (:pep:`443`)
* SHA-3 (Keccak) support for :mod:`hashlib`. * SHA-3 (Keccak) support for :mod:`hashlib`.
* TLSv1.1 and TLSv1.2 support for :mod:`ssl`. * TLSv1.1 and TLSv1.2 support for :mod:`ssl`.
* :mod:`multiprocessing` now has option to avoid using :func:`os.fork`
on Unix (:issue:`8713`).
Security improvements: Security improvements:
...@@ -254,6 +256,17 @@ mmap objects can now be weakref'ed. ...@@ -254,6 +256,17 @@ mmap objects can now be weakref'ed.
(Contributed by Valerie Lambert in :issue:`4885`.) (Contributed by Valerie Lambert in :issue:`4885`.)
multiprocessing
---------------
On Unix two new *start methods* have been added for starting processes
using :mod:`multiprocessing`. These make the mixing of processes with
threads more robust. See :issue:`8713`.
Also, except when using the old *fork* start method, child processes
will no longer inherit unneeded handles/file descriptors from their parents.
poplib poplib
------ ------
......
...@@ -21,6 +21,8 @@ __all__ = [ ...@@ -21,6 +21,8 @@ __all__ = [
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition',
'Event', 'Barrier', 'Queue', 'SimpleQueue', 'JoinableQueue', 'Pool', 'Event', 'Barrier', 'Queue', 'SimpleQueue', 'JoinableQueue', 'Pool',
'Value', 'Array', 'RawValue', 'RawArray', 'SUBDEBUG', 'SUBWARNING', 'Value', 'Array', 'RawValue', 'RawArray', 'SUBDEBUG', 'SUBWARNING',
'set_executable', 'set_start_method', 'get_start_method',
'get_all_start_methods', 'set_forkserver_preload'
] ]
# #
...@@ -30,8 +32,14 @@ __all__ = [ ...@@ -30,8 +32,14 @@ __all__ = [
import os import os
import sys import sys
from multiprocessing.process import Process, current_process, active_children from .process import Process, current_process, active_children
from multiprocessing.util import SUBDEBUG, SUBWARNING
#
# XXX These should not really be documented or public.
#
SUBDEBUG = 5
SUBWARNING = 25
# #
# Alias for main module -- will be reset by bootstrapping child processes # Alias for main module -- will be reset by bootstrapping child processes
...@@ -56,8 +64,6 @@ class TimeoutError(ProcessError): ...@@ -56,8 +64,6 @@ class TimeoutError(ProcessError):
class AuthenticationError(ProcessError): class AuthenticationError(ProcessError):
pass pass
import _multiprocessing
# #
# Definitions not depending on native semaphores # Definitions not depending on native semaphores
# #
...@@ -69,7 +75,7 @@ def Manager(): ...@@ -69,7 +75,7 @@ def Manager():
The managers methods such as `Lock()`, `Condition()` and `Queue()` The managers methods such as `Lock()`, `Condition()` and `Queue()`
can be used to create shared objects. can be used to create shared objects.
''' '''
from multiprocessing.managers import SyncManager from .managers import SyncManager
m = SyncManager() m = SyncManager()
m.start() m.start()
return m return m
...@@ -78,7 +84,7 @@ def Pipe(duplex=True): ...@@ -78,7 +84,7 @@ def Pipe(duplex=True):
''' '''
Returns two connection object connected by a pipe Returns two connection object connected by a pipe
''' '''
from multiprocessing.connection import Pipe from .connection import Pipe
return Pipe(duplex) return Pipe(duplex)
def cpu_count(): def cpu_count():
...@@ -97,21 +103,21 @@ def freeze_support(): ...@@ -97,21 +103,21 @@ def freeze_support():
If so then run code specified by commandline and exit. If so then run code specified by commandline and exit.
''' '''
if sys.platform == 'win32' and getattr(sys, 'frozen', False): if sys.platform == 'win32' and getattr(sys, 'frozen', False):
from multiprocessing.forking import freeze_support from .spawn import freeze_support
freeze_support() freeze_support()
def get_logger(): def get_logger():
''' '''
Return package logger -- if it does not already exist then it is created Return package logger -- if it does not already exist then it is created
''' '''
from multiprocessing.util import get_logger from .util import get_logger
return get_logger() return get_logger()
def log_to_stderr(level=None): def log_to_stderr(level=None):
''' '''
Turn on logging and add a handler which prints to stderr Turn on logging and add a handler which prints to stderr
''' '''
from multiprocessing.util import log_to_stderr from .util import log_to_stderr
return log_to_stderr(level) return log_to_stderr(level)
def allow_connection_pickling(): def allow_connection_pickling():
...@@ -120,7 +126,7 @@ def allow_connection_pickling(): ...@@ -120,7 +126,7 @@ def allow_connection_pickling():
''' '''
# This is undocumented. In previous versions of multiprocessing # This is undocumented. In previous versions of multiprocessing
# its only effect was to make socket objects inheritable on Windows. # its only effect was to make socket objects inheritable on Windows.
import multiprocessing.connection from . import connection
# #
# Definitions depending on native semaphores # Definitions depending on native semaphores
...@@ -130,120 +136,151 @@ def Lock(): ...@@ -130,120 +136,151 @@ def Lock():
''' '''
Returns a non-recursive lock object Returns a non-recursive lock object
''' '''
from multiprocessing.synchronize import Lock from .synchronize import Lock
return Lock() return Lock()
def RLock(): def RLock():
''' '''
Returns a recursive lock object Returns a recursive lock object
''' '''
from multiprocessing.synchronize import RLock from .synchronize import RLock
return RLock() return RLock()
def Condition(lock=None): def Condition(lock=None):
''' '''
Returns a condition object Returns a condition object
''' '''
from multiprocessing.synchronize import Condition from .synchronize import Condition
return Condition(lock) return Condition(lock)
def Semaphore(value=1): def Semaphore(value=1):
''' '''
Returns a semaphore object Returns a semaphore object
''' '''
from multiprocessing.synchronize import Semaphore from .synchronize import Semaphore
return Semaphore(value) return Semaphore(value)
def BoundedSemaphore(value=1): def BoundedSemaphore(value=1):
''' '''
Returns a bounded semaphore object Returns a bounded semaphore object
''' '''
from multiprocessing.synchronize import BoundedSemaphore from .synchronize import BoundedSemaphore
return BoundedSemaphore(value) return BoundedSemaphore(value)
def Event(): def Event():
''' '''
Returns an event object Returns an event object
''' '''
from multiprocessing.synchronize import Event from .synchronize import Event
return Event() return Event()
def Barrier(parties, action=None, timeout=None): def Barrier(parties, action=None, timeout=None):
''' '''
Returns a barrier object Returns a barrier object
''' '''
from multiprocessing.synchronize import Barrier from .synchronize import Barrier
return Barrier(parties, action, timeout) return Barrier(parties, action, timeout)
def Queue(maxsize=0): def Queue(maxsize=0):
''' '''
Returns a queue object Returns a queue object
''' '''
from multiprocessing.queues import Queue from .queues import Queue
return Queue(maxsize) return Queue(maxsize)
def JoinableQueue(maxsize=0): def JoinableQueue(maxsize=0):
''' '''
Returns a queue object Returns a queue object
''' '''
from multiprocessing.queues import JoinableQueue from .queues import JoinableQueue
return JoinableQueue(maxsize) return JoinableQueue(maxsize)
def SimpleQueue(): def SimpleQueue():
''' '''
Returns a queue object Returns a queue object
''' '''
from multiprocessing.queues import SimpleQueue from .queues import SimpleQueue
return SimpleQueue() return SimpleQueue()
def Pool(processes=None, initializer=None, initargs=(), maxtasksperchild=None): def Pool(processes=None, initializer=None, initargs=(), maxtasksperchild=None):
''' '''
Returns a process pool object Returns a process pool object
''' '''
from multiprocessing.pool import Pool from .pool import Pool
return Pool(processes, initializer, initargs, maxtasksperchild) return Pool(processes, initializer, initargs, maxtasksperchild)
def RawValue(typecode_or_type, *args): def RawValue(typecode_or_type, *args):
''' '''
Returns a shared object Returns a shared object
''' '''
from multiprocessing.sharedctypes import RawValue from .sharedctypes import RawValue
return RawValue(typecode_or_type, *args) return RawValue(typecode_or_type, *args)
def RawArray(typecode_or_type, size_or_initializer): def RawArray(typecode_or_type, size_or_initializer):
''' '''
Returns a shared array Returns a shared array
''' '''
from multiprocessing.sharedctypes import RawArray from .sharedctypes import RawArray
return RawArray(typecode_or_type, size_or_initializer) return RawArray(typecode_or_type, size_or_initializer)
def Value(typecode_or_type, *args, lock=True): def Value(typecode_or_type, *args, lock=True):
''' '''
Returns a synchronized shared object Returns a synchronized shared object
''' '''
from multiprocessing.sharedctypes import Value from .sharedctypes import Value
return Value(typecode_or_type, *args, lock=lock) return Value(typecode_or_type, *args, lock=lock)
def Array(typecode_or_type, size_or_initializer, *, lock=True): def Array(typecode_or_type, size_or_initializer, *, lock=True):
''' '''
Returns a synchronized shared array Returns a synchronized shared array
''' '''
from multiprocessing.sharedctypes import Array from .sharedctypes import Array
return Array(typecode_or_type, size_or_initializer, lock=lock) return Array(typecode_or_type, size_or_initializer, lock=lock)
# #
# #
# #
if sys.platform == 'win32': def set_executable(executable):
'''
Sets the path to a python.exe or pythonw.exe binary used to run
child processes instead of sys.executable when using the 'spawn'
start method. Useful for people embedding Python.
'''
from .spawn import set_executable
set_executable(executable)
def set_start_method(method):
'''
Set method for starting processes: 'fork', 'spawn' or 'forkserver'.
'''
from .popen import set_start_method
set_start_method(method)
def get_start_method():
'''
Get method for starting processes: 'fork', 'spawn' or 'forkserver'.
'''
from .popen import get_start_method
return get_start_method()
def set_executable(executable): def get_all_start_methods():
''' '''
Sets the path to a python.exe or pythonw.exe binary used to run Get list of availables start methods, default first.
child processes on Windows instead of sys.executable. '''
Useful for people embedding Python. from .popen import get_all_start_methods
''' return get_all_start_methods()
from multiprocessing.forking import set_executable
set_executable(executable)
__all__ += ['set_executable'] def set_forkserver_preload(module_names):
'''
Set list of module names to try to load in the forkserver process
when it is started. Properly chosen this can significantly reduce
the cost of starting a new process using the forkserver method.
The default list is ['__main__'].
'''
try:
from .forkserver import set_forkserver_preload
except ImportError:
pass
else:
set_forkserver_preload(module_names)
...@@ -21,9 +21,13 @@ import tempfile ...@@ -21,9 +21,13 @@ import tempfile
import itertools import itertools
import _multiprocessing import _multiprocessing
from multiprocessing import current_process, AuthenticationError, BufferTooShort
from multiprocessing.util import get_temp_dir, Finalize, sub_debug, debug from . import reduction
from multiprocessing.forking import ForkingPickler from . import util
from . import AuthenticationError, BufferTooShort
from .reduction import ForkingPickler
try: try:
import _winapi import _winapi
from _winapi import WAIT_OBJECT_0, WAIT_TIMEOUT, INFINITE from _winapi import WAIT_OBJECT_0, WAIT_TIMEOUT, INFINITE
...@@ -71,7 +75,7 @@ def arbitrary_address(family): ...@@ -71,7 +75,7 @@ def arbitrary_address(family):
if family == 'AF_INET': if family == 'AF_INET':
return ('localhost', 0) return ('localhost', 0)
elif family == 'AF_UNIX': elif family == 'AF_UNIX':
return tempfile.mktemp(prefix='listener-', dir=get_temp_dir()) return tempfile.mktemp(prefix='listener-', dir=util.get_temp_dir())
elif family == 'AF_PIPE': elif family == 'AF_PIPE':
return tempfile.mktemp(prefix=r'\\.\pipe\pyc-%d-%d-' % return tempfile.mktemp(prefix=r'\\.\pipe\pyc-%d-%d-' %
(os.getpid(), next(_mmap_counter))) (os.getpid(), next(_mmap_counter)))
...@@ -505,7 +509,7 @@ if sys.platform != 'win32': ...@@ -505,7 +509,7 @@ if sys.platform != 'win32':
c1 = Connection(s1.detach()) c1 = Connection(s1.detach())
c2 = Connection(s2.detach()) c2 = Connection(s2.detach())
else: else:
fd1, fd2 = os.pipe() fd1, fd2 = util.pipe()
c1 = Connection(fd1, writable=False) c1 = Connection(fd1, writable=False)
c2 = Connection(fd2, readable=False) c2 = Connection(fd2, readable=False)
...@@ -577,7 +581,7 @@ class SocketListener(object): ...@@ -577,7 +581,7 @@ class SocketListener(object):
self._last_accepted = None self._last_accepted = None
if family == 'AF_UNIX': if family == 'AF_UNIX':
self._unlink = Finalize( self._unlink = util.Finalize(
self, os.unlink, args=(address,), exitpriority=0 self, os.unlink, args=(address,), exitpriority=0
) )
else: else:
...@@ -625,8 +629,8 @@ if sys.platform == 'win32': ...@@ -625,8 +629,8 @@ if sys.platform == 'win32':
self._handle_queue = [self._new_handle(first=True)] self._handle_queue = [self._new_handle(first=True)]
self._last_accepted = None self._last_accepted = None
sub_debug('listener created with address=%r', self._address) util.sub_debug('listener created with address=%r', self._address)
self.close = Finalize( self.close = util.Finalize(
self, PipeListener._finalize_pipe_listener, self, PipeListener._finalize_pipe_listener,
args=(self._handle_queue, self._address), exitpriority=0 args=(self._handle_queue, self._address), exitpriority=0
) )
...@@ -668,7 +672,7 @@ if sys.platform == 'win32': ...@@ -668,7 +672,7 @@ if sys.platform == 'win32':
@staticmethod @staticmethod
def _finalize_pipe_listener(queue, address): def _finalize_pipe_listener(queue, address):
sub_debug('closing listener with address=%r', address) util.sub_debug('closing listener with address=%r', address)
for handle in queue: for handle in queue:
_winapi.CloseHandle(handle) _winapi.CloseHandle(handle)
...@@ -919,15 +923,32 @@ else: ...@@ -919,15 +923,32 @@ else:
# #
if sys.platform == 'win32': if sys.platform == 'win32':
from . import reduction def reduce_connection(conn):
ForkingPickler.register(socket.socket, reduction.reduce_socket) handle = conn.fileno()
ForkingPickler.register(Connection, reduction.reduce_connection) with socket.fromfd(handle, socket.AF_INET, socket.SOCK_STREAM) as s:
ForkingPickler.register(PipeConnection, reduction.reduce_pipe_connection) from . import resource_sharer
ds = resource_sharer.DupSocket(s)
return rebuild_connection, (ds, conn.readable, conn.writable)
def rebuild_connection(ds, readable, writable):
sock = ds.detach()
return Connection(sock.detach(), readable, writable)
reduction.register(Connection, reduce_connection)
def reduce_pipe_connection(conn):
access = ((_winapi.FILE_GENERIC_READ if conn.readable else 0) |
(_winapi.FILE_GENERIC_WRITE if conn.writable else 0))
dh = reduction.DupHandle(conn.fileno(), access)
return rebuild_pipe_connection, (dh, conn.readable, conn.writable)
def rebuild_pipe_connection(dh, readable, writable):
handle = dh.detach()
return PipeConnection(handle, readable, writable)
reduction.register(PipeConnection, reduce_pipe_connection)
else: else:
try: def reduce_connection(conn):
from . import reduction df = reduction.DupFd(conn.fileno())
except ImportError: return rebuild_connection, (df, conn.readable, conn.writable)
pass def rebuild_connection(df, readable, writable):
else: fd = df.detach()
ForkingPickler.register(socket.socket, reduction.reduce_socket) return Connection(fd, readable, writable)
ForkingPickler.register(Connection, reduction.reduce_connection) reduction.register(Connection, reduce_connection)
...@@ -22,7 +22,7 @@ import sys ...@@ -22,7 +22,7 @@ import sys
import weakref import weakref
import array import array
from multiprocessing.dummy.connection import Pipe from .connection import Pipe
from threading import Lock, RLock, Semaphore, BoundedSemaphore from threading import Lock, RLock, Semaphore, BoundedSemaphore
from threading import Event, Condition, Barrier from threading import Event, Condition, Barrier
from queue import Queue from queue import Queue
...@@ -113,7 +113,7 @@ def shutdown(): ...@@ -113,7 +113,7 @@ def shutdown():
pass pass
def Pool(processes=None, initializer=None, initargs=()): def Pool(processes=None, initializer=None, initargs=()):
from multiprocessing.pool import ThreadPool from ..pool import ThreadPool
return ThreadPool(processes, initializer, initargs) return ThreadPool(processes, initializer, initargs)
JoinableQueue = Queue JoinableQueue = Queue
This diff is collapsed.
import errno
import os
import select
import signal
import socket
import struct
import sys
import threading
from . import connection
from . import process
from . import reduction
from . import spawn
from . import util
__all__ = ['ensure_running', 'get_inherited_fds', 'connect_to_new_process',
'set_forkserver_preload']
#
#
#
MAXFDS_TO_SEND = 256
UNSIGNED_STRUCT = struct.Struct('Q') # large enough for pid_t
_inherited_fds = None
_lock = threading.Lock()
_preload_modules = ['__main__']
#
# Public function
#
def set_forkserver_preload(modules_names):
'''Set list of module names to try to load in forkserver process.'''
global _preload_modules
_preload_modules = modules_names
def get_inherited_fds():
'''Return list of fds inherited from parent process.
This returns None if the current process was not started by fork server.
'''
return _inherited_fds
def connect_to_new_process(fds):
'''Request forkserver to create a child process.
Returns a pair of fds (status_r, data_w). The calling process can read
the child process's pid and (eventually) its returncode from status_r.
The calling process should write to data_w the pickled preparation and
process data.
'''
if len(fds) + 3 >= MAXFDS_TO_SEND:
raise ValueError('too many fds')
address, alive_w = process.current_process()._config['forkserver_info']
with socket.socket(socket.AF_UNIX) as client:
client.connect(address)
parent_r, child_w = util.pipe()
child_r, parent_w = util.pipe()
allfds = [child_r, child_w, alive_w]
allfds += fds
try:
reduction.sendfds(client, allfds)
return parent_r, parent_w
except:
os.close(parent_r)
os.close(parent_w)
raise
finally:
os.close(child_r)
os.close(child_w)
def ensure_running():
'''Make sure that a fork server is running.
This can be called from any process. Note that usually a child
process will just reuse the forkserver started by its parent, so
ensure_running() will do nothing.
'''
with _lock:
config = process.current_process()._config
if config.get('forkserver_info') is not None:
return
assert all(type(mod) is str for mod in _preload_modules)
semaphore_tracker_fd = config['semaphore_tracker_fd']
cmd = ('from multiprocessing.forkserver import main; ' +
'main(%d, %d, %r, **%r)')
if _preload_modules:
desired_keys = {'main_path', 'sys_path'}
data = spawn.get_preparation_data('ignore')
data = dict((x,y) for (x,y) in data.items() if x in desired_keys)
else:
data = {}
with socket.socket(socket.AF_UNIX) as listener:
address = connection.arbitrary_address('AF_UNIX')
listener.bind(address)
os.chmod(address, 0o600)
listener.listen(100)
# all client processes own the write end of the "alive" pipe;
# when they all terminate the read end becomes ready.
alive_r, alive_w = os.pipe()
config['forkserver_info'] = (address, alive_w)
fds_to_pass = [listener.fileno(), alive_r, semaphore_tracker_fd]
cmd %= (listener.fileno(), alive_r, _preload_modules, data)
exe = spawn.get_executable()
args = [exe] + util._args_from_interpreter_flags() + ['-c', cmd]
pid = util.spawnv_passfds(exe, args, fds_to_pass)
def main(listener_fd, alive_r, preload, main_path=None, sys_path=None):
'''Run forkserver.'''
if preload:
if '__main__' in preload and main_path is not None:
process.current_process()._inheriting = True
try:
spawn.import_main_path(main_path)
finally:
del process.current_process()._inheriting
for modname in preload:
try:
__import__(modname)
except ImportError:
pass
# close sys.stdin
if sys.stdin is not None:
try:
sys.stdin.close()
sys.stdin = open(os.devnull)
except (OSError, ValueError):
pass
# ignoring SIGCHLD means no need to reap zombie processes
handler = signal.signal(signal.SIGCHLD, signal.SIG_IGN)
with socket.socket(socket.AF_UNIX, fileno=listener_fd) as listener:
readers = [listener, alive_r]
while True:
try:
rfds, wfds, xfds = select.select(readers, [], [])
if alive_r in rfds:
# EOF because no more client processes left
assert os.read(alive_r, 1) == b''
raise SystemExit
assert listener in rfds
with listener.accept()[0] as s:
code = 1
if os.fork() == 0:
try:
_serve_one(s, listener, alive_r, handler)
except Exception:
sys.excepthook(*sys.exc_info())
sys.stderr.flush()
finally:
os._exit(code)
except InterruptedError:
pass
except OSError as e:
if e.errno != errno.ECONNABORTED:
raise
#
# Code to bootstrap new process
#
def _serve_one(s, listener, alive_r, handler):
global _inherited_fds
# close unnecessary stuff and reset SIGCHLD handler
listener.close()
os.close(alive_r)
signal.signal(signal.SIGCHLD, handler)
# receive fds from parent process
fds = reduction.recvfds(s, MAXFDS_TO_SEND + 1)
s.close()
assert len(fds) <= MAXFDS_TO_SEND
child_r, child_w, alive_w, *_inherited_fds = fds
# send pid to client processes
write_unsigned(child_w, os.getpid())
# reseed random number generator
if 'random' in sys.modules:
import random
random.seed()
# run process object received over pipe
code = spawn._main(child_r)
# write the exit code to the pipe
write_unsigned(child_w, code)
#
# Read and write unsigned numbers
#
def read_unsigned(fd):
data = b''
length = UNSIGNED_STRUCT.size
while len(data) < length:
while True:
try:
s = os.read(fd, length - len(data))
except InterruptedError:
pass
else:
break
if not s:
raise EOFError('unexpected EOF')
data += s
return UNSIGNED_STRUCT.unpack(data)[0]
def write_unsigned(fd, n):
msg = UNSIGNED_STRUCT.pack(n)
while msg:
while True:
try:
nbytes = os.write(fd, msg)
except InterruptedError:
pass
else:
break
if nbytes == 0:
raise RuntimeError('should not get here')
msg = msg[nbytes:]
...@@ -8,15 +8,17 @@ ...@@ -8,15 +8,17 @@
# #
import bisect import bisect
import itertools
import mmap import mmap
import os import os
import sys import sys
import tempfile
import threading import threading
import itertools
import _multiprocessing import _multiprocessing
from multiprocessing.util import Finalize, info
from multiprocessing.forking import assert_spawning from . import popen
from . import reduction
from . import util
__all__ = ['BufferWrapper'] __all__ = ['BufferWrapper']
...@@ -30,17 +32,25 @@ if sys.platform == 'win32': ...@@ -30,17 +32,25 @@ if sys.platform == 'win32':
class Arena(object): class Arena(object):
_counter = itertools.count() _rand = tempfile._RandomNameSequence()
def __init__(self, size): def __init__(self, size):
self.size = size self.size = size
self.name = 'pym-%d-%d' % (os.getpid(), next(Arena._counter)) for i in range(100):
self.buffer = mmap.mmap(-1, self.size, tagname=self.name) name = 'pym-%d-%s' % (os.getpid(), next(self._rand))
assert _winapi.GetLastError() == 0, 'tagname already in use' buf = mmap.mmap(-1, size, tagname=name)
if _winapi.GetLastError() == 0:
break
# We have reopened a preexisting mmap.
buf.close()
else:
raise FileExistsError('Cannot find name for new mmap')
self.name = name
self.buffer = buf
self._state = (self.size, self.name) self._state = (self.size, self.name)
def __getstate__(self): def __getstate__(self):
assert_spawning(self) popen.assert_spawning(self)
return self._state return self._state
def __setstate__(self, state): def __setstate__(self, state):
...@@ -52,10 +62,28 @@ else: ...@@ -52,10 +62,28 @@ else:
class Arena(object): class Arena(object):
def __init__(self, size): def __init__(self, size, fd=-1):
self.buffer = mmap.mmap(-1, size)
self.size = size self.size = size
self.name = None self.fd = fd
if fd == -1:
self.fd, name = tempfile.mkstemp(
prefix='pym-%d-'%os.getpid(), dir=util.get_temp_dir())
os.unlink(name)
util.Finalize(self, os.close, (self.fd,))
with open(self.fd, 'wb', closefd=False) as f:
f.write(b'\0'*size)
self.buffer = mmap.mmap(self.fd, self.size)
def reduce_arena(a):
if a.fd == -1:
raise ValueError('Arena is unpicklable because '
'forking was enabled when it was created')
return rebuild_arena, (a.size, reduction.DupFd(a.fd))
def rebuild_arena(size, dupfd):
return Arena(size, dupfd.detach())
reduction.register(Arena, reduce_arena)
# #
# Class allowing allocation of chunks of memory from arenas # Class allowing allocation of chunks of memory from arenas
...@@ -90,7 +118,7 @@ class Heap(object): ...@@ -90,7 +118,7 @@ class Heap(object):
if i == len(self._lengths): if i == len(self._lengths):
length = self._roundup(max(self._size, size), mmap.PAGESIZE) length = self._roundup(max(self._size, size), mmap.PAGESIZE)
self._size *= 2 self._size *= 2
info('allocating a new mmap of length %d', length) util.info('allocating a new mmap of length %d', length)
arena = Arena(length) arena = Arena(length)
self._arenas.append(arena) self._arenas.append(arena)
return (arena, 0, length) return (arena, 0, length)
...@@ -216,7 +244,7 @@ class BufferWrapper(object): ...@@ -216,7 +244,7 @@ class BufferWrapper(object):
assert 0 <= size < sys.maxsize assert 0 <= size < sys.maxsize
block = BufferWrapper._heap.malloc(size) block = BufferWrapper._heap.malloc(size)
self._state = (block, size) self._state = (block, size)
Finalize(self, BufferWrapper._heap.free, args=(block,)) util.Finalize(self, BufferWrapper._heap.free, args=(block,))
def create_memoryview(self): def create_memoryview(self):
(arena, start, stop), size = self._state (arena, start, stop), size = self._state
......
...@@ -19,11 +19,15 @@ import threading ...@@ -19,11 +19,15 @@ import threading
import array import array
import queue import queue
from traceback import format_exc
from multiprocessing import Process, current_process, active_children, Pool, util, connection
from multiprocessing.process import AuthenticationString
from multiprocessing.forking import Popen, ForkingPickler
from time import time as _time from time import time as _time
from traceback import format_exc
from . import connection
from . import pool
from . import process
from . import popen
from . import reduction
from . import util
# #
# Register some things for pickling # Register some things for pickling
...@@ -31,16 +35,14 @@ from time import time as _time ...@@ -31,16 +35,14 @@ from time import time as _time
def reduce_array(a): def reduce_array(a):
return array.array, (a.typecode, a.tobytes()) return array.array, (a.typecode, a.tobytes())
ForkingPickler.register(array.array, reduce_array) reduction.register(array.array, reduce_array)
view_types = [type(getattr({}, name)()) for name in ('items','keys','values')] view_types = [type(getattr({}, name)()) for name in ('items','keys','values')]
if view_types[0] is not list: # only needed in Py3.0 if view_types[0] is not list: # only needed in Py3.0
def rebuild_as_list(obj): def rebuild_as_list(obj):
return list, (list(obj),) return list, (list(obj),)
for view_type in view_types: for view_type in view_types:
ForkingPickler.register(view_type, rebuild_as_list) reduction.register(view_type, rebuild_as_list)
import copyreg
copyreg.pickle(view_type, rebuild_as_list)
# #
# Type for identifying shared objects # Type for identifying shared objects
...@@ -130,7 +132,7 @@ class Server(object): ...@@ -130,7 +132,7 @@ class Server(object):
def __init__(self, registry, address, authkey, serializer): def __init__(self, registry, address, authkey, serializer):
assert isinstance(authkey, bytes) assert isinstance(authkey, bytes)
self.registry = registry self.registry = registry
self.authkey = AuthenticationString(authkey) self.authkey = process.AuthenticationString(authkey)
Listener, Client = listener_client[serializer] Listener, Client = listener_client[serializer]
# do authentication later # do authentication later
...@@ -146,7 +148,7 @@ class Server(object): ...@@ -146,7 +148,7 @@ class Server(object):
Run the server forever Run the server forever
''' '''
self.stop_event = threading.Event() self.stop_event = threading.Event()
current_process()._manager_server = self process.current_process()._manager_server = self
try: try:
accepter = threading.Thread(target=self.accepter) accepter = threading.Thread(target=self.accepter)
accepter.daemon = True accepter.daemon = True
...@@ -438,9 +440,9 @@ class BaseManager(object): ...@@ -438,9 +440,9 @@ class BaseManager(object):
def __init__(self, address=None, authkey=None, serializer='pickle'): def __init__(self, address=None, authkey=None, serializer='pickle'):
if authkey is None: if authkey is None:
authkey = current_process().authkey authkey = process.current_process().authkey
self._address = address # XXX not final address if eg ('', 0) self._address = address # XXX not final address if eg ('', 0)
self._authkey = AuthenticationString(authkey) self._authkey = process.AuthenticationString(authkey)
self._state = State() self._state = State()
self._state.value = State.INITIAL self._state.value = State.INITIAL
self._serializer = serializer self._serializer = serializer
...@@ -476,7 +478,7 @@ class BaseManager(object): ...@@ -476,7 +478,7 @@ class BaseManager(object):
reader, writer = connection.Pipe(duplex=False) reader, writer = connection.Pipe(duplex=False)
# spawn process which runs a server # spawn process which runs a server
self._process = Process( self._process = process.Process(
target=type(self)._run_server, target=type(self)._run_server,
args=(self._registry, self._address, self._authkey, args=(self._registry, self._address, self._authkey,
self._serializer, writer, initializer, initargs), self._serializer, writer, initializer, initargs),
...@@ -691,11 +693,11 @@ class BaseProxy(object): ...@@ -691,11 +693,11 @@ class BaseProxy(object):
self._Client = listener_client[serializer][1] self._Client = listener_client[serializer][1]
if authkey is not None: if authkey is not None:
self._authkey = AuthenticationString(authkey) self._authkey = process.AuthenticationString(authkey)
elif self._manager is not None: elif self._manager is not None:
self._authkey = self._manager._authkey self._authkey = self._manager._authkey
else: else:
self._authkey = current_process().authkey self._authkey = process.current_process().authkey
if incref: if incref:
self._incref() self._incref()
...@@ -704,7 +706,7 @@ class BaseProxy(object): ...@@ -704,7 +706,7 @@ class BaseProxy(object):
def _connect(self): def _connect(self):
util.debug('making connection to manager') util.debug('making connection to manager')
name = current_process().name name = process.current_process().name
if threading.current_thread().name != 'MainThread': if threading.current_thread().name != 'MainThread':
name += '|' + threading.current_thread().name name += '|' + threading.current_thread().name
conn = self._Client(self._token.address, authkey=self._authkey) conn = self._Client(self._token.address, authkey=self._authkey)
...@@ -798,7 +800,7 @@ class BaseProxy(object): ...@@ -798,7 +800,7 @@ class BaseProxy(object):
def __reduce__(self): def __reduce__(self):
kwds = {} kwds = {}
if Popen.thread_is_spawning(): if popen.get_spawning_popen() is not None:
kwds['authkey'] = self._authkey kwds['authkey'] = self._authkey
if getattr(self, '_isauto', False): if getattr(self, '_isauto', False):
...@@ -835,14 +837,14 @@ def RebuildProxy(func, token, serializer, kwds): ...@@ -835,14 +837,14 @@ def RebuildProxy(func, token, serializer, kwds):
If possible the shared object is returned, or otherwise a proxy for it. If possible the shared object is returned, or otherwise a proxy for it.
''' '''
server = getattr(current_process(), '_manager_server', None) server = getattr(process.current_process(), '_manager_server', None)
if server and server.address == token.address: if server and server.address == token.address:
return server.id_to_obj[token.id][0] return server.id_to_obj[token.id][0]
else: else:
incref = ( incref = (
kwds.pop('incref', True) and kwds.pop('incref', True) and
not getattr(current_process(), '_inheriting', False) not getattr(process.current_process(), '_inheriting', False)
) )
return func(token, serializer, incref=incref, **kwds) return func(token, serializer, incref=incref, **kwds)
...@@ -889,7 +891,7 @@ def AutoProxy(token, serializer, manager=None, authkey=None, ...@@ -889,7 +891,7 @@ def AutoProxy(token, serializer, manager=None, authkey=None,
if authkey is None and manager is not None: if authkey is None and manager is not None:
authkey = manager._authkey authkey = manager._authkey
if authkey is None: if authkey is None:
authkey = current_process().authkey authkey = process.current_process().authkey
ProxyType = MakeProxyType('AutoProxy[%s]' % token.typeid, exposed) ProxyType = MakeProxyType('AutoProxy[%s]' % token.typeid, exposed)
proxy = ProxyType(token, serializer, manager=manager, authkey=authkey, proxy = ProxyType(token, serializer, manager=manager, authkey=authkey,
...@@ -1109,7 +1111,7 @@ SyncManager.register('BoundedSemaphore', threading.BoundedSemaphore, ...@@ -1109,7 +1111,7 @@ SyncManager.register('BoundedSemaphore', threading.BoundedSemaphore,
AcquirerProxy) AcquirerProxy)
SyncManager.register('Condition', threading.Condition, ConditionProxy) SyncManager.register('Condition', threading.Condition, ConditionProxy)
SyncManager.register('Barrier', threading.Barrier, BarrierProxy) SyncManager.register('Barrier', threading.Barrier, BarrierProxy)
SyncManager.register('Pool', Pool, PoolProxy) SyncManager.register('Pool', pool.Pool, PoolProxy)
SyncManager.register('list', list, ListProxy) SyncManager.register('list', list, ListProxy)
SyncManager.register('dict', dict, DictProxy) SyncManager.register('dict', dict, DictProxy)
SyncManager.register('Value', Value, ValueProxy) SyncManager.register('Value', Value, ValueProxy)
......
This diff is collapsed.
import sys
import threading
__all__ = ['Popen', 'get_spawning_popen', 'set_spawning_popen',
'assert_spawning']
#
# Check that the current thread is spawning a child process
#
_tls = threading.local()
def get_spawning_popen():
return getattr(_tls, 'spawning_popen', None)
def set_spawning_popen(popen):
_tls.spawning_popen = popen
def assert_spawning(obj):
if get_spawning_popen() is None:
raise RuntimeError(
'%s objects should only be shared between processes'
' through inheritance' % type(obj).__name__
)
#
#
#
_Popen = None
def Popen(process_obj):
if _Popen is None:
set_start_method()
return _Popen(process_obj)
def get_start_method():
if _Popen is None:
set_start_method()
return _Popen.method
def set_start_method(meth=None, *, start_helpers=True):
global _Popen
try:
modname = _method_to_module[meth]
__import__(modname)
except (KeyError, ImportError):
raise ValueError('could not use start method %r' % meth)
module = sys.modules[modname]
if start_helpers:
module.Popen.ensure_helpers_running()
_Popen = module.Popen
if sys.platform == 'win32':
_method_to_module = {
None: 'multiprocessing.popen_spawn_win32',
'spawn': 'multiprocessing.popen_spawn_win32',
}
def get_all_start_methods():
return ['spawn']
else:
_method_to_module = {
None: 'multiprocessing.popen_fork',
'fork': 'multiprocessing.popen_fork',
'spawn': 'multiprocessing.popen_spawn_posix',
'forkserver': 'multiprocessing.popen_forkserver',
}
def get_all_start_methods():
from . import reduction
if reduction.HAVE_SEND_HANDLE:
return ['fork', 'spawn', 'forkserver']
else:
return ['fork', 'spawn']
import os
import sys
import signal
import errno
from . import util
__all__ = ['Popen']
#
# Start child process using fork
#
class Popen(object):
method = 'fork'
def __init__(self, process_obj):
sys.stdout.flush()
sys.stderr.flush()
self.returncode = None
self._launch(process_obj)
def duplicate_for_child(self, fd):
return fd
def poll(self, flag=os.WNOHANG):
if self.returncode is None:
while True:
try:
pid, sts = os.waitpid(self.pid, flag)
except OSError as e:
if e.errno == errno.EINTR:
continue
# Child process not yet created. See #1731717
# e.errno == errno.ECHILD == 10
return None
else:
break
if pid == self.pid:
if os.WIFSIGNALED(sts):
self.returncode = -os.WTERMSIG(sts)
else:
assert os.WIFEXITED(sts)
self.returncode = os.WEXITSTATUS(sts)
return self.returncode
def wait(self, timeout=None):
if self.returncode is None:
if timeout is not None:
from .connection import wait
if not wait([self.sentinel], timeout):
return None
# This shouldn't block if wait() returned successfully.
return self.poll(os.WNOHANG if timeout == 0.0 else 0)
return self.returncode
def terminate(self):
if self.returncode is None:
try:
os.kill(self.pid, signal.SIGTERM)
except ProcessLookupError:
pass
except OSError:
if self.wait(timeout=0.1) is None:
raise
def _launch(self, process_obj):
code = 1
parent_r, child_w = util.pipe()
self.pid = os.fork()
if self.pid == 0:
try:
os.close(parent_r)
if 'random' in sys.modules:
import random
random.seed()
code = process_obj._bootstrap()
finally:
os._exit(code)
else:
os.close(child_w)
util.Finalize(self, os.close, (parent_r,))
self.sentinel = parent_r
@staticmethod
def ensure_helpers_running():
pass
import io
import os
from . import reduction
if not reduction.HAVE_SEND_HANDLE:
raise ImportError('No support for sending fds between processes')
from . import forkserver
from . import popen
from . import popen_fork
from . import spawn
from . import util
__all__ = ['Popen']
#
# Wrapper for an fd used while launching a process
#
class _DupFd(object):
def __init__(self, ind):
self.ind = ind
def detach(self):
return forkserver.get_inherited_fds()[self.ind]
#
# Start child process using a server process
#
class Popen(popen_fork.Popen):
method = 'forkserver'
DupFd = _DupFd
def __init__(self, process_obj):
self._fds = []
super().__init__(process_obj)
def duplicate_for_child(self, fd):
self._fds.append(fd)
return len(self._fds) - 1
def _launch(self, process_obj):
prep_data = spawn.get_preparation_data(process_obj._name)
buf = io.BytesIO()
popen.set_spawning_popen(self)
try:
reduction.dump(prep_data, buf)
reduction.dump(process_obj, buf)
finally:
popen.set_spawning_popen(None)
self.sentinel, w = forkserver.connect_to_new_process(self._fds)
util.Finalize(self, os.close, (self.sentinel,))
with open(w, 'wb', closefd=True) as f:
f.write(buf.getbuffer())
self.pid = forkserver.read_unsigned(self.sentinel)
def poll(self, flag=os.WNOHANG):
if self.returncode is None:
from .connection import wait
timeout = 0 if flag == os.WNOHANG else None
if not wait([self.sentinel], timeout):
return None
try:
self.returncode = forkserver.read_unsigned(self.sentinel)
except (OSError, EOFError):
# The process ended abnormally perhaps because of a signal
self.returncode = 255
return self.returncode
@staticmethod
def ensure_helpers_running():
from . import semaphore_tracker
semaphore_tracker.ensure_running()
forkserver.ensure_running()
import fcntl
import io
import os
from . import popen
from . import popen_fork
from . import reduction
from . import spawn
from . import util
from . import current_process
__all__ = ['Popen']
#
# Wrapper for an fd used while launching a process
#
class _DupFd(object):
def __init__(self, fd):
self.fd = fd
def detach(self):
return self.fd
#
# Start child process using a fresh interpreter
#
class Popen(popen_fork.Popen):
method = 'spawn'
DupFd = _DupFd
def __init__(self, process_obj):
self._fds = []
super().__init__(process_obj)
def duplicate_for_child(self, fd):
self._fds.append(fd)
return fd
def _launch(self, process_obj):
tracker_fd = current_process()._config['semaphore_tracker_fd']
self._fds.append(tracker_fd)
prep_data = spawn.get_preparation_data(process_obj._name)
fp = io.BytesIO()
popen.set_spawning_popen(self)
try:
reduction.dump(prep_data, fp)
reduction.dump(process_obj, fp)
finally:
popen.set_spawning_popen(None)
parent_r = child_w = child_r = parent_w = None
try:
parent_r, child_w = util.pipe()
child_r, parent_w = util.pipe()
cmd = spawn.get_command_line() + [str(child_r)]
self._fds.extend([child_r, child_w])
self.pid = util.spawnv_passfds(spawn.get_executable(),
cmd, self._fds)
self.sentinel = parent_r
with open(parent_w, 'wb', closefd=False) as f:
f.write(fp.getbuffer())
finally:
if parent_r is not None:
util.Finalize(self, os.close, (parent_r,))
for fd in (child_r, child_w, parent_w):
if fd is not None:
os.close(fd)
@staticmethod
def ensure_helpers_running():
from . import semaphore_tracker
semaphore_tracker.ensure_running()
import os
import msvcrt
import signal
import sys
import _winapi
from . import spawn
from . import popen
from . import reduction
from . import util
__all__ = ['Popen']
#
#
#
TERMINATE = 0x10000
WINEXE = (sys.platform == 'win32' and getattr(sys, 'frozen', False))
WINSERVICE = sys.executable.lower().endswith("pythonservice.exe")
#
# We define a Popen class similar to the one from subprocess, but
# whose constructor takes a process object as its argument.
#
class Popen(object):
'''
Start a subprocess to run the code of a process object
'''
method = 'spawn'
def __init__(self, process_obj):
prep_data = spawn.get_preparation_data(process_obj._name)
cmd = ' '.join('"%s"' % x for x in spawn.get_command_line())
# read end of pipe will be "stolen" by the child process
# -- see spawn_main() in spawn.py.
rhandle, whandle = _winapi.CreatePipe(None, 0)
wfd = msvcrt.open_osfhandle(whandle, 0)
cmd += ' {} {}'.format(os.getpid(), rhandle)
with open(wfd, 'wb', closefd=True) as to_child:
# start process
try:
hp, ht, pid, tid = _winapi.CreateProcess(
spawn.get_executable(), cmd,
None, None, False, 0, None, None, None)
_winapi.CloseHandle(ht)
except:
_winapi.CloseHandle(rhandle)
raise
# set attributes of self
self.pid = pid
self.returncode = None
self._handle = hp
self.sentinel = int(hp)
util.Finalize(self, _winapi.CloseHandle, (self.sentinel,))
# send information to child
popen.set_spawning_popen(self)
try:
reduction.dump(prep_data, to_child)
reduction.dump(process_obj, to_child)
finally:
popen.set_spawning_popen(None)
def duplicate_for_child(self, handle):
assert self is popen.get_spawning_popen()
return reduction.duplicate(handle, self.sentinel)
def wait(self, timeout=None):
if self.returncode is None:
if timeout is None:
msecs = _winapi.INFINITE
else:
msecs = max(0, int(timeout * 1000 + 0.5))
res = _winapi.WaitForSingleObject(int(self._handle), msecs)
if res == _winapi.WAIT_OBJECT_0:
code = _winapi.GetExitCodeProcess(self._handle)
if code == TERMINATE:
code = -signal.SIGTERM
self.returncode = code
return self.returncode
def poll(self):
return self.wait(timeout=0)
def terminate(self):
if self.returncode is None:
try:
_winapi.TerminateProcess(int(self._handle), TERMINATE)
except OSError:
if self.wait(timeout=1.0) is None:
raise
@staticmethod
def ensure_helpers_running():
pass
...@@ -43,7 +43,7 @@ def active_children(): ...@@ -43,7 +43,7 @@ def active_children():
Return list of process objects corresponding to live child processes Return list of process objects corresponding to live child processes
''' '''
_cleanup() _cleanup()
return list(_current_process._children) return list(_children)
# #
# #
...@@ -51,9 +51,9 @@ def active_children(): ...@@ -51,9 +51,9 @@ def active_children():
def _cleanup(): def _cleanup():
# check for processes which have finished # check for processes which have finished
for p in list(_current_process._children): for p in list(_children):
if p._popen.poll() is not None: if p._popen.poll() is not None:
_current_process._children.discard(p) _children.discard(p)
# #
# The `Process` class # The `Process` class
...@@ -63,21 +63,16 @@ class Process(object): ...@@ -63,21 +63,16 @@ class Process(object):
''' '''
Process objects represent activity that is run in a separate process Process objects represent activity that is run in a separate process
The class is analagous to `threading.Thread` The class is analogous to `threading.Thread`
''' '''
_Popen = None _Popen = None
def __init__(self, group=None, target=None, name=None, args=(), kwargs={}, def __init__(self, group=None, target=None, name=None, args=(), kwargs={},
*, daemon=None): *, daemon=None):
assert group is None, 'group argument must be None for now' assert group is None, 'group argument must be None for now'
count = next(_current_process._counter) count = next(_process_counter)
self._identity = _current_process._identity + (count,) self._identity = _current_process._identity + (count,)
self._authkey = _current_process._authkey self._config = _current_process._config.copy()
if daemon is not None:
self._daemonic = daemon
else:
self._daemonic = _current_process._daemonic
self._tempdir = _current_process._tempdir
self._parent_pid = os.getpid() self._parent_pid = os.getpid()
self._popen = None self._popen = None
self._target = target self._target = target
...@@ -85,6 +80,8 @@ class Process(object): ...@@ -85,6 +80,8 @@ class Process(object):
self._kwargs = dict(kwargs) self._kwargs = dict(kwargs)
self._name = name or type(self).__name__ + '-' + \ self._name = name or type(self).__name__ + '-' + \
':'.join(str(i) for i in self._identity) ':'.join(str(i) for i in self._identity)
if daemon is not None:
self.daemon = daemon
_dangling.add(self) _dangling.add(self)
def run(self): def run(self):
...@@ -101,16 +98,16 @@ class Process(object): ...@@ -101,16 +98,16 @@ class Process(object):
assert self._popen is None, 'cannot start a process twice' assert self._popen is None, 'cannot start a process twice'
assert self._parent_pid == os.getpid(), \ assert self._parent_pid == os.getpid(), \
'can only start a process object created by current process' 'can only start a process object created by current process'
assert not _current_process._daemonic, \ assert not _current_process._config.get('daemon'), \
'daemonic processes are not allowed to have children' 'daemonic processes are not allowed to have children'
_cleanup() _cleanup()
if self._Popen is not None: if self._Popen is not None:
Popen = self._Popen Popen = self._Popen
else: else:
from .forking import Popen from .popen import Popen
self._popen = Popen(self) self._popen = Popen(self)
self._sentinel = self._popen.sentinel self._sentinel = self._popen.sentinel
_current_process._children.add(self) _children.add(self)
def terminate(self): def terminate(self):
''' '''
...@@ -126,7 +123,7 @@ class Process(object): ...@@ -126,7 +123,7 @@ class Process(object):
assert self._popen is not None, 'can only join a started process' assert self._popen is not None, 'can only join a started process'
res = self._popen.wait(timeout) res = self._popen.wait(timeout)
if res is not None: if res is not None:
_current_process._children.discard(self) _children.discard(self)
def is_alive(self): def is_alive(self):
''' '''
...@@ -154,7 +151,7 @@ class Process(object): ...@@ -154,7 +151,7 @@ class Process(object):
''' '''
Return whether process is a daemon Return whether process is a daemon
''' '''
return self._daemonic return self._config.get('daemon', False)
@daemon.setter @daemon.setter
def daemon(self, daemonic): def daemon(self, daemonic):
...@@ -162,18 +159,18 @@ class Process(object): ...@@ -162,18 +159,18 @@ class Process(object):
Set whether process is a daemon Set whether process is a daemon
''' '''
assert self._popen is None, 'process has already started' assert self._popen is None, 'process has already started'
self._daemonic = daemonic self._config['daemon'] = daemonic
@property @property
def authkey(self): def authkey(self):
return self._authkey return self._config['authkey']
@authkey.setter @authkey.setter
def authkey(self, authkey): def authkey(self, authkey):
''' '''
Set authorization key of process Set authorization key of process
''' '''
self._authkey = AuthenticationString(authkey) self._config['authkey'] = AuthenticationString(authkey)
@property @property
def exitcode(self): def exitcode(self):
...@@ -227,17 +224,17 @@ class Process(object): ...@@ -227,17 +224,17 @@ class Process(object):
status = 'stopped[%s]' % _exitcode_to_name.get(status, status) status = 'stopped[%s]' % _exitcode_to_name.get(status, status)
return '<%s(%s, %s%s)>' % (type(self).__name__, self._name, return '<%s(%s, %s%s)>' % (type(self).__name__, self._name,
status, self._daemonic and ' daemon' or '') status, self.daemon and ' daemon' or '')
## ##
def _bootstrap(self): def _bootstrap(self):
from . import util from . import util
global _current_process global _current_process, _process_counter, _children
try: try:
self._children = set() _process_counter = itertools.count(1)
self._counter = itertools.count(1) _children = set()
if sys.stdin is not None: if sys.stdin is not None:
try: try:
sys.stdin.close() sys.stdin.close()
...@@ -285,8 +282,8 @@ class Process(object): ...@@ -285,8 +282,8 @@ class Process(object):
class AuthenticationString(bytes): class AuthenticationString(bytes):
def __reduce__(self): def __reduce__(self):
from .forking import Popen from .popen import get_spawning_popen
if not Popen.thread_is_spawning(): if get_spawning_popen() is None:
raise TypeError( raise TypeError(
'Pickling an AuthenticationString object is ' 'Pickling an AuthenticationString object is '
'disallowed for security reasons' 'disallowed for security reasons'
...@@ -301,16 +298,19 @@ class _MainProcess(Process): ...@@ -301,16 +298,19 @@ class _MainProcess(Process):
def __init__(self): def __init__(self):
self._identity = () self._identity = ()
self._daemonic = False
self._name = 'MainProcess' self._name = 'MainProcess'
self._parent_pid = None self._parent_pid = None
self._popen = None self._popen = None
self._counter = itertools.count(1) self._config = {'authkey': AuthenticationString(os.urandom(32)),
self._children = set() 'semprefix': 'mp'}
self._authkey = AuthenticationString(os.urandom(32)) # Note that some versions of FreeBSD only allow named
self._tempdir = None # semaphores to have names of up to 14 characters. Therfore
# we choose a short prefix.
_current_process = _MainProcess() _current_process = _MainProcess()
_process_counter = itertools.count(1)
_children = set()
del _MainProcess del _MainProcess
# #
......
...@@ -18,11 +18,15 @@ import weakref ...@@ -18,11 +18,15 @@ import weakref
import errno import errno
from queue import Empty, Full from queue import Empty, Full
import _multiprocessing import _multiprocessing
from multiprocessing.connection import Pipe
from multiprocessing.synchronize import Lock, BoundedSemaphore, Semaphore, Condition from . import connection
from multiprocessing.util import debug, info, Finalize, register_after_fork from . import popen
from multiprocessing.forking import assert_spawning, ForkingPickler from . import synchronize
from .util import debug, info, Finalize, register_after_fork, is_exiting
from .reduction import ForkingPickler
# #
# Queue type using a pipe, buffer and thread # Queue type using a pipe, buffer and thread
...@@ -34,14 +38,14 @@ class Queue(object): ...@@ -34,14 +38,14 @@ class Queue(object):
if maxsize <= 0: if maxsize <= 0:
maxsize = _multiprocessing.SemLock.SEM_VALUE_MAX maxsize = _multiprocessing.SemLock.SEM_VALUE_MAX
self._maxsize = maxsize self._maxsize = maxsize
self._reader, self._writer = Pipe(duplex=False) self._reader, self._writer = connection.Pipe(duplex=False)
self._rlock = Lock() self._rlock = synchronize.Lock()
self._opid = os.getpid() self._opid = os.getpid()
if sys.platform == 'win32': if sys.platform == 'win32':
self._wlock = None self._wlock = None
else: else:
self._wlock = Lock() self._wlock = synchronize.Lock()
self._sem = BoundedSemaphore(maxsize) self._sem = synchronize.BoundedSemaphore(maxsize)
# For use by concurrent.futures # For use by concurrent.futures
self._ignore_epipe = False self._ignore_epipe = False
...@@ -51,7 +55,7 @@ class Queue(object): ...@@ -51,7 +55,7 @@ class Queue(object):
register_after_fork(self, Queue._after_fork) register_after_fork(self, Queue._after_fork)
def __getstate__(self): def __getstate__(self):
assert_spawning(self) popen.assert_spawning(self)
return (self._ignore_epipe, self._maxsize, self._reader, self._writer, return (self._ignore_epipe, self._maxsize, self._reader, self._writer,
self._rlock, self._wlock, self._sem, self._opid) self._rlock, self._wlock, self._sem, self._opid)
...@@ -208,8 +212,6 @@ class Queue(object): ...@@ -208,8 +212,6 @@ class Queue(object):
@staticmethod @staticmethod
def _feed(buffer, notempty, send_bytes, writelock, close, ignore_epipe): def _feed(buffer, notempty, send_bytes, writelock, close, ignore_epipe):
debug('starting thread to feed data to pipe') debug('starting thread to feed data to pipe')
from .util import is_exiting
nacquire = notempty.acquire nacquire = notempty.acquire
nrelease = notempty.release nrelease = notempty.release
nwait = notempty.wait nwait = notempty.wait
...@@ -279,8 +281,8 @@ class JoinableQueue(Queue): ...@@ -279,8 +281,8 @@ class JoinableQueue(Queue):
def __init__(self, maxsize=0): def __init__(self, maxsize=0):
Queue.__init__(self, maxsize) Queue.__init__(self, maxsize)
self._unfinished_tasks = Semaphore(0) self._unfinished_tasks = synchronize.Semaphore(0)
self._cond = Condition() self._cond = synchronize.Condition()
def __getstate__(self): def __getstate__(self):
return Queue.__getstate__(self) + (self._cond, self._unfinished_tasks) return Queue.__getstate__(self) + (self._cond, self._unfinished_tasks)
...@@ -331,19 +333,19 @@ class JoinableQueue(Queue): ...@@ -331,19 +333,19 @@ class JoinableQueue(Queue):
class SimpleQueue(object): class SimpleQueue(object):
def __init__(self): def __init__(self):
self._reader, self._writer = Pipe(duplex=False) self._reader, self._writer = connection.Pipe(duplex=False)
self._rlock = Lock() self._rlock = synchronize.Lock()
self._poll = self._reader.poll self._poll = self._reader.poll
if sys.platform == 'win32': if sys.platform == 'win32':
self._wlock = None self._wlock = None
else: else:
self._wlock = Lock() self._wlock = synchronize.Lock()
def empty(self): def empty(self):
return not self._poll() return not self._poll()
def __getstate__(self): def __getstate__(self):
assert_spawning(self) popen.assert_spawning(self)
return (self._reader, self._writer, self._rlock, self._wlock) return (self._reader, self._writer, self._rlock, self._wlock)
def __setstate__(self, state): def __setstate__(self, state):
......
This diff is collapsed.
#
# We use a background thread for sharing fds on Unix, and for sharing sockets on
# Windows.
#
# A client which wants to pickle a resource registers it with the resource
# sharer and gets an identifier in return. The unpickling process will connect
# to the resource sharer, sends the identifier and its pid, and then receives
# the resource.
#
import os
import signal
import socket
import sys
import threading
from . import process
from . import reduction
from . import util
__all__ = ['stop']
if sys.platform == 'win32':
__all__ += ['DupSocket']
class DupSocket(object):
'''Picklable wrapper for a socket.'''
def __init__(self, sock):
new_sock = sock.dup()
def send(conn, pid):
share = new_sock.share(pid)
conn.send_bytes(share)
self._id = _resource_sharer.register(send, new_sock.close)
def detach(self):
'''Get the socket. This should only be called once.'''
with _resource_sharer.get_connection(self._id) as conn:
share = conn.recv_bytes()
return socket.fromshare(share)
else:
__all__ += ['DupFd']
class DupFd(object):
'''Wrapper for fd which can be used at any time.'''
def __init__(self, fd):
new_fd = os.dup(fd)
def send(conn, pid):
reduction.send_handle(conn, new_fd, pid)
def close():
os.close(new_fd)
self._id = _resource_sharer.register(send, close)
def detach(self):
'''Get the fd. This should only be called once.'''
with _resource_sharer.get_connection(self._id) as conn:
return reduction.recv_handle(conn)
class _ResourceSharer(object):
'''Manager for resouces using background thread.'''
def __init__(self):
self._key = 0
self._cache = {}
self._old_locks = []
self._lock = threading.Lock()
self._listener = None
self._address = None
self._thread = None
util.register_after_fork(self, _ResourceSharer._afterfork)
def register(self, send, close):
'''Register resource, returning an identifier.'''
with self._lock:
if self._address is None:
self._start()
self._key += 1
self._cache[self._key] = (send, close)
return (self._address, self._key)
@staticmethod
def get_connection(ident):
'''Return connection from which to receive identified resource.'''
from .connection import Client
address, key = ident
c = Client(address, authkey=process.current_process().authkey)
c.send((key, os.getpid()))
return c
def stop(self, timeout=None):
'''Stop the background thread and clear registered resources.'''
from .connection import Client
with self._lock:
if self._address is not None:
c = Client(self._address,
authkey=process.current_process().authkey)
c.send(None)
c.close()
self._thread.join(timeout)
if self._thread.is_alive():
util.sub_warning('_ResourceSharer thread did '
'not stop when asked')
self._listener.close()
self._thread = None
self._address = None
self._listener = None
for key, (send, close) in self._cache.items():
close()
self._cache.clear()
def _afterfork(self):
for key, (send, close) in self._cache.items():
close()
self._cache.clear()
# If self._lock was locked at the time of the fork, it may be broken
# -- see issue 6721. Replace it without letting it be gc'ed.
self._old_locks.append(self._lock)
self._lock = threading.Lock()
if self._listener is not None:
self._listener.close()
self._listener = None
self._address = None
self._thread = None
def _start(self):
from .connection import Listener
assert self._listener is None
util.debug('starting listener and thread for sending handles')
self._listener = Listener(authkey=process.current_process().authkey)
self._address = self._listener.address
t = threading.Thread(target=self._serve)
t.daemon = True
t.start()
self._thread = t
def _serve(self):
if hasattr(signal, 'pthread_sigmask'):
signal.pthread_sigmask(signal.SIG_BLOCK, range(1, signal.NSIG))
while 1:
try:
with self._listener.accept() as conn:
msg = conn.recv()
if msg is None:
break
key, destination_pid = msg
send, close = self._cache.pop(key)
try:
send(conn, destination_pid)
finally:
close()
except:
if not util.is_exiting():
sys.excepthook(*sys.exc_info())
_resource_sharer = _ResourceSharer()
stop = _resource_sharer.stop
#
# On Unix we run a server process which keeps track of unlinked
# semaphores. The server ignores SIGINT and SIGTERM and reads from a
# pipe. Every other process of the program has a copy of the writable
# end of the pipe, so we get EOF when all other processes have exited.
# Then the server process unlinks any remaining semaphore names.
#
# This is important because the system only supports a limited number
# of named semaphores, and they will not be automatically removed till
# the next reboot. Without this semaphore tracker process, "killall
# python" would probably leave unlinked semaphores.
#
import errno
import os
import signal
import sys
import threading
import warnings
import _multiprocessing
from . import spawn
from . import util
from . import current_process
__all__ = ['ensure_running', 'register', 'unregister']
_lock = threading.Lock()
def ensure_running():
'''Make sure that semaphore tracker process is running.
This can be run from any process. Usually a child process will use
the semaphore created by its parent.'''
with _lock:
config = current_process()._config
if config.get('semaphore_tracker_fd') is not None:
return
fds_to_pass = []
try:
fds_to_pass.append(sys.stderr.fileno())
except Exception:
pass
cmd = 'from multiprocessing.semaphore_tracker import main; main(%d)'
r, semaphore_tracker_fd = util.pipe()
try:
fds_to_pass.append(r)
# process will out live us, so no need to wait on pid
exe = spawn.get_executable()
args = [exe] + util._args_from_interpreter_flags()
args += ['-c', cmd % r]
util.spawnv_passfds(exe, args, fds_to_pass)
except:
os.close(semaphore_tracker_fd)
raise
else:
config['semaphore_tracker_fd'] = semaphore_tracker_fd
finally:
os.close(r)
def register(name):
'''Register name of semaphore with semaphore tracker.'''
_send('REGISTER', name)
def unregister(name):
'''Unregister name of semaphore with semaphore tracker.'''
_send('UNREGISTER', name)
def _send(cmd, name):
msg = '{0}:{1}\n'.format(cmd, name).encode('ascii')
if len(name) > 512:
# posix guarantees that writes to a pipe of less than PIPE_BUF
# bytes are atomic, and that PIPE_BUF >= 512
raise ValueError('name too long')
fd = current_process()._config['semaphore_tracker_fd']
nbytes = os.write(fd, msg)
assert nbytes == len(msg)
def main(fd):
'''Run semaphore tracker.'''
# protect the process from ^C and "killall python" etc
signal.signal(signal.SIGINT, signal.SIG_IGN)
signal.signal(signal.SIGTERM, signal.SIG_IGN)
for f in (sys.stdin, sys.stdout):
try:
f.close()
except Exception:
pass
cache = set()
try:
# keep track of registered/unregistered semaphores
with open(fd, 'rb') as f:
for line in f:
try:
cmd, name = line.strip().split(b':')
if cmd == b'REGISTER':
cache.add(name)
elif cmd == b'UNREGISTER':
cache.remove(name)
else:
raise RuntimeError('unrecognized command %r' % cmd)
except Exception:
try:
sys.excepthook(*sys.exc_info())
except:
pass
finally:
# all processes have terminated; cleanup any remaining semaphores
if cache:
try:
warnings.warn('semaphore_tracker: There appear to be %d '
'leaked semaphores to clean up at shutdown' %
len(cache))
except Exception:
pass
for name in cache:
# For some reason the process which created and registered this
# semaphore has failed to unregister it. Presumably it has died.
# We therefore unlink it.
try:
name = name.decode('ascii')
try:
_multiprocessing.sem_unlink(name)
except Exception as e:
warnings.warn('semaphore_tracker: %r: %s' % (name, e))
finally:
pass
...@@ -10,8 +10,11 @@ ...@@ -10,8 +10,11 @@
import ctypes import ctypes
import weakref import weakref
from multiprocessing import heap, RLock from . import heap
from multiprocessing.forking import assert_spawning, ForkingPickler
from .synchronize import RLock
from .reduction import ForkingPickler
from .popen import assert_spawning
__all__ = ['RawValue', 'RawArray', 'Value', 'Array', 'copy', 'synchronized'] __all__ = ['RawValue', 'RawArray', 'Value', 'Array', 'copy', 'synchronized']
......
#
# Code used to start processes when using the spawn or forkserver
# start methods.
#
# multiprocessing/spawn.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
import os
import pickle
import sys
from . import process
from . import util
from . import popen
__all__ = ['_main', 'freeze_support', 'set_executable', 'get_executable',
'get_preparation_data', 'get_command_line', 'import_main_path']
#
# _python_exe is the assumed path to the python executable.
# People embedding Python want to modify it.
#
if sys.platform != 'win32':
WINEXE = False
WINSERVICE = False
else:
WINEXE = (sys.platform == 'win32' and getattr(sys, 'frozen', False))
WINSERVICE = sys.executable.lower().endswith("pythonservice.exe")
if WINSERVICE:
_python_exe = os.path.join(sys.exec_prefix, 'python.exe')
else:
_python_exe = sys.executable
def set_executable(exe):
global _python_exe
_python_exe = exe
def get_executable():
return _python_exe
#
#
#
def is_forking(argv):
'''
Return whether commandline indicates we are forking
'''
if len(argv) >= 2 and argv[1] == '--multiprocessing-fork':
return True
else:
return False
def freeze_support():
'''
Run code for process object if this in not the main process
'''
if is_forking(sys.argv):
main()
sys.exit()
def get_command_line():
'''
Returns prefix of command line used for spawning a child process
'''
if getattr(sys, 'frozen', False):
return [sys.executable, '--multiprocessing-fork']
else:
prog = 'from multiprocessing.spawn import spawn_main; spawn_main()'
opts = util._args_from_interpreter_flags()
return [_python_exe] + opts + ['-c', prog, '--multiprocessing-fork']
def spawn_main():
'''
Run code specifed by data received over pipe
'''
assert is_forking(sys.argv)
handle = int(sys.argv[-1])
if sys.platform == 'win32':
import msvcrt
from .reduction import steal_handle
pid = int(sys.argv[-2])
new_handle = steal_handle(pid, handle)
fd = msvcrt.open_osfhandle(new_handle, os.O_RDONLY)
else:
fd = handle
exitcode = _main(fd)
sys.exit(exitcode)
def _main(fd):
with os.fdopen(fd, 'rb', closefd=True) as from_parent:
process.current_process()._inheriting = True
try:
preparation_data = pickle.load(from_parent)
prepare(preparation_data)
self = pickle.load(from_parent)
finally:
del process.current_process()._inheriting
return self._bootstrap()
def _check_not_importing_main():
if getattr(process.current_process(), '_inheriting', False):
raise RuntimeError('''
An attempt has been made to start a new process before the
current process has finished its bootstrapping phase.
This probably means that you are not using fork to start your
child processes and you have forgotten to use the proper idiom
in the main module:
if __name__ == '__main__':
freeze_support()
...
The "freeze_support()" line can be omitted if the program
is not going to be frozen to produce an executable.''')
def get_preparation_data(name):
'''
Return info about parent needed by child to unpickle process object
'''
_check_not_importing_main()
d = dict(
log_to_stderr=util._log_to_stderr,
authkey=process.current_process().authkey,
)
if util._logger is not None:
d['log_level'] = util._logger.getEffectiveLevel()
sys_path=sys.path.copy()
try:
i = sys_path.index('')
except ValueError:
pass
else:
sys_path[i] = process.ORIGINAL_DIR
d.update(
name=name,
sys_path=sys_path,
sys_argv=sys.argv,
orig_dir=process.ORIGINAL_DIR,
dir=os.getcwd(),
start_method=popen.get_start_method(),
)
if sys.platform != 'win32' or (not WINEXE and not WINSERVICE):
main_path = getattr(sys.modules['__main__'], '__file__', None)
if not main_path and sys.argv[0] not in ('', '-c'):
main_path = sys.argv[0]
if main_path is not None:
if (not os.path.isabs(main_path) and
process.ORIGINAL_DIR is not None):
main_path = os.path.join(process.ORIGINAL_DIR, main_path)
d['main_path'] = os.path.normpath(main_path)
return d
#
# Prepare current process
#
old_main_modules = []
def prepare(data):
'''
Try to get current process ready to unpickle process object
'''
if 'name' in data:
process.current_process().name = data['name']
if 'authkey' in data:
process.current_process().authkey = data['authkey']
if 'log_to_stderr' in data and data['log_to_stderr']:
util.log_to_stderr()
if 'log_level' in data:
util.get_logger().setLevel(data['log_level'])
if 'sys_path' in data:
sys.path = data['sys_path']
if 'sys_argv' in data:
sys.argv = data['sys_argv']
if 'dir' in data:
os.chdir(data['dir'])
if 'orig_dir' in data:
process.ORIGINAL_DIR = data['orig_dir']
if 'start_method' in data:
popen.set_start_method(data['start_method'], start_helpers=False)
if 'main_path' in data:
import_main_path(data['main_path'])
def import_main_path(main_path):
'''
Set sys.modules['__main__'] to module at main_path
'''
# XXX (ncoghlan): The following code makes several bogus
# assumptions regarding the relationship between __file__
# and a module's real name. See PEP 302 and issue #10845
if getattr(sys.modules['__main__'], '__file__', None) == main_path:
return
main_name = os.path.splitext(os.path.basename(main_path))[0]
if main_name == '__init__':
main_name = os.path.basename(os.path.dirname(main_path))
if main_name == '__main__':
main_module = sys.modules['__main__']
main_module.__file__ = main_path
elif main_name != 'ipython':
# Main modules not actually called __main__.py may
# contain additional code that should still be executed
import importlib
import types
if main_path is None:
dirs = None
elif os.path.basename(main_path).startswith('__init__.py'):
dirs = [os.path.dirname(os.path.dirname(main_path))]
else:
dirs = [os.path.dirname(main_path)]
assert main_name not in sys.modules, main_name
sys.modules.pop('__mp_main__', None)
# We should not try to load __main__
# since that would execute 'if __name__ == "__main__"'
# clauses, potentially causing a psuedo fork bomb.
loader = importlib.find_loader(main_name, path=dirs)
main_module = types.ModuleType(main_name)
try:
loader.init_module_attrs(main_module)
except AttributeError: # init_module_attrs is optional
pass
main_module.__name__ = '__mp_main__'
code = loader.get_code(main_name)
exec(code, main_module.__dict__)
old_main_modules.append(sys.modules['__main__'])
sys.modules['__main__'] = sys.modules['__mp_main__'] = main_module
...@@ -11,20 +11,24 @@ __all__ = [ ...@@ -11,20 +11,24 @@ __all__ = [
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event' 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event'
] ]
import os
import threading import threading
import sys import sys
import itertools
import tempfile
import _multiprocessing import _multiprocessing
from multiprocessing.process import current_process
from multiprocessing.util import register_after_fork, debug
from multiprocessing.forking import assert_spawning, Popen
from time import time as _time from time import time as _time
from . import popen
from . import process
from . import util
# Try to import the mp.synchronize module cleanly, if it fails # Try to import the mp.synchronize module cleanly, if it fails
# raise ImportError for platforms lacking a working sem_open implementation. # raise ImportError for platforms lacking a working sem_open implementation.
# See issue 3770 # See issue 3770
try: try:
from _multiprocessing import SemLock from _multiprocessing import SemLock, sem_unlink
except (ImportError): except (ImportError):
raise ImportError("This platform lacks a functioning sem_open" + raise ImportError("This platform lacks a functioning sem_open" +
" implementation, therefore, the required" + " implementation, therefore, the required" +
...@@ -44,15 +48,45 @@ SEM_VALUE_MAX = _multiprocessing.SemLock.SEM_VALUE_MAX ...@@ -44,15 +48,45 @@ SEM_VALUE_MAX = _multiprocessing.SemLock.SEM_VALUE_MAX
class SemLock(object): class SemLock(object):
_rand = tempfile._RandomNameSequence()
def __init__(self, kind, value, maxvalue): def __init__(self, kind, value, maxvalue):
sl = self._semlock = _multiprocessing.SemLock(kind, value, maxvalue) unlink_immediately = (sys.platform == 'win32' or
debug('created semlock with handle %s' % sl.handle) popen.get_start_method() == 'fork')
for i in range(100):
try:
sl = self._semlock = _multiprocessing.SemLock(
kind, value, maxvalue, self._make_name(),
unlink_immediately)
except FileExistsError:
pass
else:
break
else:
raise FileExistsError('cannot find name for semaphore')
util.debug('created semlock with handle %s' % sl.handle)
self._make_methods() self._make_methods()
if sys.platform != 'win32': if sys.platform != 'win32':
def _after_fork(obj): def _after_fork(obj):
obj._semlock._after_fork() obj._semlock._after_fork()
register_after_fork(self, _after_fork) util.register_after_fork(self, _after_fork)
if self._semlock.name is not None:
# We only get here if we are on Unix with forking
# disabled. When the object is garbage collected or the
# process shuts down we unlink the semaphore name
from .semaphore_tracker import register
register(self._semlock.name)
util.Finalize(self, SemLock._cleanup, (self._semlock.name,),
exitpriority=0)
@staticmethod
def _cleanup(name):
from .semaphore_tracker import unregister
sem_unlink(name)
unregister(name)
def _make_methods(self): def _make_methods(self):
self.acquire = self._semlock.acquire self.acquire = self._semlock.acquire
...@@ -65,15 +99,24 @@ class SemLock(object): ...@@ -65,15 +99,24 @@ class SemLock(object):
return self._semlock.__exit__(*args) return self._semlock.__exit__(*args)
def __getstate__(self): def __getstate__(self):
assert_spawning(self) popen.assert_spawning(self)
sl = self._semlock sl = self._semlock
return (Popen.duplicate_for_child(sl.handle), sl.kind, sl.maxvalue) if sys.platform == 'win32':
h = popen.get_spawning_popen().duplicate_for_child(sl.handle)
else:
h = sl.handle
return (h, sl.kind, sl.maxvalue, sl.name)
def __setstate__(self, state): def __setstate__(self, state):
self._semlock = _multiprocessing.SemLock._rebuild(*state) self._semlock = _multiprocessing.SemLock._rebuild(*state)
debug('recreated blocker with handle %r' % state[0]) util.debug('recreated blocker with handle %r' % state[0])
self._make_methods() self._make_methods()
@staticmethod
def _make_name():
return '/%s-%s' % (process.current_process()._config['semprefix'],
next(SemLock._rand))
# #
# Semaphore # Semaphore
# #
...@@ -122,7 +165,7 @@ class Lock(SemLock): ...@@ -122,7 +165,7 @@ class Lock(SemLock):
def __repr__(self): def __repr__(self):
try: try:
if self._semlock._is_mine(): if self._semlock._is_mine():
name = current_process().name name = process.current_process().name
if threading.current_thread().name != 'MainThread': if threading.current_thread().name != 'MainThread':
name += '|' + threading.current_thread().name name += '|' + threading.current_thread().name
elif self._semlock._get_value() == 1: elif self._semlock._get_value() == 1:
...@@ -147,7 +190,7 @@ class RLock(SemLock): ...@@ -147,7 +190,7 @@ class RLock(SemLock):
def __repr__(self): def __repr__(self):
try: try:
if self._semlock._is_mine(): if self._semlock._is_mine():
name = current_process().name name = process.current_process().name
if threading.current_thread().name != 'MainThread': if threading.current_thread().name != 'MainThread':
name += '|' + threading.current_thread().name name += '|' + threading.current_thread().name
count = self._semlock._count() count = self._semlock._count()
...@@ -175,7 +218,7 @@ class Condition(object): ...@@ -175,7 +218,7 @@ class Condition(object):
self._make_methods() self._make_methods()
def __getstate__(self): def __getstate__(self):
assert_spawning(self) popen.assert_spawning(self)
return (self._lock, self._sleeping_count, return (self._lock, self._sleeping_count,
self._woken_count, self._wait_semaphore) self._woken_count, self._wait_semaphore)
...@@ -342,7 +385,7 @@ class Barrier(threading.Barrier): ...@@ -342,7 +385,7 @@ class Barrier(threading.Barrier):
def __init__(self, parties, action=None, timeout=None): def __init__(self, parties, action=None, timeout=None):
import struct import struct
from multiprocessing.heap import BufferWrapper from .heap import BufferWrapper
wrapper = BufferWrapper(struct.calcsize('i') * 2) wrapper = BufferWrapper(struct.calcsize('i') * 2)
cond = Condition() cond = Condition()
self.__setstate__((parties, action, timeout, cond, wrapper)) self.__setstate__((parties, action, timeout, cond, wrapper))
......
...@@ -17,13 +17,13 @@ import threading # we want threading to install it's ...@@ -17,13 +17,13 @@ import threading # we want threading to install it's
# cleanup function before multiprocessing does # cleanup function before multiprocessing does
from subprocess import _args_from_interpreter_flags from subprocess import _args_from_interpreter_flags
from multiprocessing.process import current_process, active_children from . import process
__all__ = [ __all__ = [
'sub_debug', 'debug', 'info', 'sub_warning', 'get_logger', 'sub_debug', 'debug', 'info', 'sub_warning', 'get_logger',
'log_to_stderr', 'get_temp_dir', 'register_after_fork', 'log_to_stderr', 'get_temp_dir', 'register_after_fork',
'is_exiting', 'Finalize', 'ForkAwareThreadLock', 'ForkAwareLocal', 'is_exiting', 'Finalize', 'ForkAwareThreadLock', 'ForkAwareLocal',
'SUBDEBUG', 'SUBWARNING', 'close_all_fds_except', 'SUBDEBUG', 'SUBWARNING',
] ]
# #
...@@ -71,8 +71,6 @@ def get_logger(): ...@@ -71,8 +71,6 @@ def get_logger():
_logger = logging.getLogger(LOGGER_NAME) _logger = logging.getLogger(LOGGER_NAME)
_logger.propagate = 0 _logger.propagate = 0
logging.addLevelName(SUBDEBUG, 'SUBDEBUG')
logging.addLevelName(SUBWARNING, 'SUBWARNING')
# XXX multiprocessing should cleanup before logging # XXX multiprocessing should cleanup before logging
if hasattr(atexit, 'unregister'): if hasattr(atexit, 'unregister'):
...@@ -111,13 +109,14 @@ def log_to_stderr(level=None): ...@@ -111,13 +109,14 @@ def log_to_stderr(level=None):
def get_temp_dir(): def get_temp_dir():
# get name of a temp directory which will be automatically cleaned up # get name of a temp directory which will be automatically cleaned up
if current_process()._tempdir is None: tempdir = process.current_process()._config.get('tempdir')
if tempdir is None:
import shutil, tempfile import shutil, tempfile
tempdir = tempfile.mkdtemp(prefix='pymp-') tempdir = tempfile.mkdtemp(prefix='pymp-')
info('created temp directory %s', tempdir) info('created temp directory %s', tempdir)
Finalize(None, shutil.rmtree, args=[tempdir], exitpriority=-100) Finalize(None, shutil.rmtree, args=[tempdir], exitpriority=-100)
current_process()._tempdir = tempdir process.current_process()._config['tempdir'] = tempdir
return current_process()._tempdir return tempdir
# #
# Support for reinitialization of objects when bootstrapping a child process # Support for reinitialization of objects when bootstrapping a child process
...@@ -273,8 +272,8 @@ def is_exiting(): ...@@ -273,8 +272,8 @@ def is_exiting():
_exiting = False _exiting = False
def _exit_function(info=info, debug=debug, _run_finalizers=_run_finalizers, def _exit_function(info=info, debug=debug, _run_finalizers=_run_finalizers,
active_children=active_children, active_children=process.active_children,
current_process=current_process): current_process=process.current_process):
# We hold on to references to functions in the arglist due to the # We hold on to references to functions in the arglist due to the
# situation described below, where this function is called after this # situation described below, where this function is called after this
# module's globals are destroyed. # module's globals are destroyed.
...@@ -303,7 +302,7 @@ def _exit_function(info=info, debug=debug, _run_finalizers=_run_finalizers, ...@@ -303,7 +302,7 @@ def _exit_function(info=info, debug=debug, _run_finalizers=_run_finalizers,
# #9207. # #9207.
for p in active_children(): for p in active_children():
if p._daemonic: if p.daemon:
info('calling terminate() for daemon %s', p.name) info('calling terminate() for daemon %s', p.name)
p._popen.terminate() p._popen.terminate()
...@@ -335,3 +334,54 @@ class ForkAwareLocal(threading.local): ...@@ -335,3 +334,54 @@ class ForkAwareLocal(threading.local):
register_after_fork(self, lambda obj : obj.__dict__.clear()) register_after_fork(self, lambda obj : obj.__dict__.clear())
def __reduce__(self): def __reduce__(self):
return type(self), () return type(self), ()
#
# Close fds except those specified
#
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except Exception:
MAXFD = 256
def close_all_fds_except(fds):
fds = list(fds) + [-1, MAXFD]
fds.sort()
assert fds[-1] == MAXFD, 'fd too large'
for i in range(len(fds) - 1):
os.closerange(fds[i]+1, fds[i+1])
#
# Start a program with only specified fds kept open
#
def spawnv_passfds(path, args, passfds):
import _posixsubprocess, fcntl
passfds = sorted(passfds)
tmp = []
# temporarily unset CLOEXEC on passed fds
for fd in passfds:
flag = fcntl.fcntl(fd, fcntl.F_GETFD)
if flag & fcntl.FD_CLOEXEC:
fcntl.fcntl(fd, fcntl.F_SETFD, flag & ~fcntl.FD_CLOEXEC)
tmp.append((fd, flag))
errpipe_read, errpipe_write = _posixsubprocess.cloexec_pipe()
try:
return _posixsubprocess.fork_exec(
args, [os.fsencode(path)], True, passfds, None, None,
-1, -1, -1, -1, -1, -1, errpipe_read, errpipe_write,
False, False, None)
finally:
os.close(errpipe_read)
os.close(errpipe_write)
# reset CLOEXEC where necessary
for fd, flag in tmp:
fcntl.fcntl(fd, fcntl.F_SETFD, flag)
#
# Return pipe with CLOEXEC set on fds
#
def pipe():
import _posixsubprocess
return _posixsubprocess.cloexec_pipe()
...@@ -7,6 +7,11 @@ def foo(): ...@@ -7,6 +7,11 @@ def foo():
# correctly on Windows. However, we should get a RuntimeError rather # correctly on Windows. However, we should get a RuntimeError rather
# than the Windows equivalent of a fork bomb. # than the Windows equivalent of a fork bomb.
if len(sys.argv) > 1:
multiprocessing.set_start_method(sys.argv[1])
else:
multiprocessing.set_start_method('spawn')
p = multiprocessing.Process(target=foo) p = multiprocessing.Process(target=foo)
p.start() p.start()
p.join() p.join()
......
...@@ -149,7 +149,7 @@ try: ...@@ -149,7 +149,7 @@ try:
except ImportError: except ImportError:
threading = None threading = None
try: try:
import multiprocessing.process import _multiprocessing, multiprocessing.process
except ImportError: except ImportError:
multiprocessing = None multiprocessing = None
......
import unittest
import test._test_multiprocessing
test._test_multiprocessing.install_tests_in_module_dict(globals(), 'fork')
if __name__ == '__main__':
unittest.main()
import unittest
import test._test_multiprocessing
test._test_multiprocessing.install_tests_in_module_dict(globals(), 'forkserver')
if __name__ == '__main__':
unittest.main()
import unittest
import test._test_multiprocessing
test._test_multiprocessing.install_tests_in_module_dict(globals(), 'spawn')
if __name__ == '__main__':
unittest.main()
...@@ -938,7 +938,9 @@ buildbottest: all platform ...@@ -938,7 +938,9 @@ buildbottest: all platform
QUICKTESTOPTS= $(TESTOPTS) -x test_subprocess test_io test_lib2to3 \ QUICKTESTOPTS= $(TESTOPTS) -x test_subprocess test_io test_lib2to3 \
test_multibytecodec test_urllib2_localnet test_itertools \ test_multibytecodec test_urllib2_localnet test_itertools \
test_multiprocessing test_mailbox test_socket test_poll \ test_multiprocessing_fork test_multiprocessing_spawn \
test_multiprocessing_forkserver \
test_mailbox test_socket test_poll \
test_select test_zipfile test_concurrent_futures test_select test_zipfile test_concurrent_futures
quicktest: all platform quicktest: all platform
$(TESTRUNNER) $(QUICKTESTOPTS) $(TESTRUNNER) $(QUICKTESTOPTS)
......
...@@ -126,6 +126,7 @@ static PyMethodDef module_methods[] = { ...@@ -126,6 +126,7 @@ static PyMethodDef module_methods[] = {
{"recv", multiprocessing_recv, METH_VARARGS, ""}, {"recv", multiprocessing_recv, METH_VARARGS, ""},
{"send", multiprocessing_send, METH_VARARGS, ""}, {"send", multiprocessing_send, METH_VARARGS, ""},
#endif #endif
{"sem_unlink", _PyMp_sem_unlink, METH_VARARGS, ""},
{NULL} {NULL}
}; };
......
...@@ -98,5 +98,6 @@ PyObject *_PyMp_SetError(PyObject *Type, int num); ...@@ -98,5 +98,6 @@ PyObject *_PyMp_SetError(PyObject *Type, int num);
*/ */
extern PyTypeObject _PyMp_SemLockType; extern PyTypeObject _PyMp_SemLockType;
extern PyObject *_PyMp_sem_unlink(PyObject *ignore, PyObject *args);
#endif /* MULTIPROCESSING_H */ #endif /* MULTIPROCESSING_H */
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment