Commit 190d56e0 authored by Benjamin Peterson's avatar Benjamin Peterson

add the multiprocessing package to fulfill PEP 371

parent d5299866
#
# Simple benchmarks for the multiprocessing package
#
import time, sys, multiprocessing, threading, Queue, gc
if sys.platform == 'win32':
_timer = time.clock
else:
_timer = time.time
delta = 1
#### TEST_QUEUESPEED
def queuespeed_func(q, c, iterations):
a = '0' * 256
c.acquire()
c.notify()
c.release()
for i in xrange(iterations):
q.put(a)
q.put('STOP')
def test_queuespeed(Process, q, c):
elapsed = 0
iterations = 1
while elapsed < delta:
iterations *= 2
p = Process(target=queuespeed_func, args=(q, c, iterations))
c.acquire()
p.start()
c.wait()
c.release()
result = None
t = _timer()
while result != 'STOP':
result = q.get()
elapsed = _timer() - t
p.join()
print iterations, 'objects passed through the queue in', elapsed, 'seconds'
print 'average number/sec:', iterations/elapsed
#### TEST_PIPESPEED
def pipe_func(c, cond, iterations):
a = '0' * 256
cond.acquire()
cond.notify()
cond.release()
for i in xrange(iterations):
c.send(a)
c.send('STOP')
def test_pipespeed():
c, d = multiprocessing.Pipe()
cond = multiprocessing.Condition()
elapsed = 0
iterations = 1
while elapsed < delta:
iterations *= 2
p = multiprocessing.Process(target=pipe_func,
args=(d, cond, iterations))
cond.acquire()
p.start()
cond.wait()
cond.release()
result = None
t = _timer()
while result != 'STOP':
result = c.recv()
elapsed = _timer() - t
p.join()
print iterations, 'objects passed through connection in',elapsed,'seconds'
print 'average number/sec:', iterations/elapsed
#### TEST_SEQSPEED
def test_seqspeed(seq):
elapsed = 0
iterations = 1
while elapsed < delta:
iterations *= 2
t = _timer()
for i in xrange(iterations):
a = seq[5]
elapsed = _timer()-t
print iterations, 'iterations in', elapsed, 'seconds'
print 'average number/sec:', iterations/elapsed
#### TEST_LOCK
def test_lockspeed(l):
elapsed = 0
iterations = 1
while elapsed < delta:
iterations *= 2
t = _timer()
for i in xrange(iterations):
l.acquire()
l.release()
elapsed = _timer()-t
print iterations, 'iterations in', elapsed, 'seconds'
print 'average number/sec:', iterations/elapsed
#### TEST_CONDITION
def conditionspeed_func(c, N):
c.acquire()
c.notify()
for i in xrange(N):
c.wait()
c.notify()
c.release()
def test_conditionspeed(Process, c):
elapsed = 0
iterations = 1
while elapsed < delta:
iterations *= 2
c.acquire()
p = Process(target=conditionspeed_func, args=(c, iterations))
p.start()
c.wait()
t = _timer()
for i in xrange(iterations):
c.notify()
c.wait()
elapsed = _timer()-t
c.release()
p.join()
print iterations * 2, 'waits in', elapsed, 'seconds'
print 'average number/sec:', iterations * 2 / elapsed
####
def test():
manager = multiprocessing.Manager()
gc.disable()
print '\n\t######## testing Queue.Queue\n'
test_queuespeed(threading.Thread, Queue.Queue(),
threading.Condition())
print '\n\t######## testing multiprocessing.Queue\n'
test_queuespeed(multiprocessing.Process, multiprocessing.Queue(),
multiprocessing.Condition())
print '\n\t######## testing Queue managed by server process\n'
test_queuespeed(multiprocessing.Process, manager.Queue(),
manager.Condition())
print '\n\t######## testing multiprocessing.Pipe\n'
test_pipespeed()
print
print '\n\t######## testing list\n'
test_seqspeed(range(10))
print '\n\t######## testing list managed by server process\n'
test_seqspeed(manager.list(range(10)))
print '\n\t######## testing Array("i", ..., lock=False)\n'
test_seqspeed(multiprocessing.Array('i', range(10), lock=False))
print '\n\t######## testing Array("i", ..., lock=True)\n'
test_seqspeed(multiprocessing.Array('i', range(10), lock=True))
print
print '\n\t######## testing threading.Lock\n'
test_lockspeed(threading.Lock())
print '\n\t######## testing threading.RLock\n'
test_lockspeed(threading.RLock())
print '\n\t######## testing multiprocessing.Lock\n'
test_lockspeed(multiprocessing.Lock())
print '\n\t######## testing multiprocessing.RLock\n'
test_lockspeed(multiprocessing.RLock())
print '\n\t######## testing lock managed by server process\n'
test_lockspeed(manager.Lock())
print '\n\t######## testing rlock managed by server process\n'
test_lockspeed(manager.RLock())
print
print '\n\t######## testing threading.Condition\n'
test_conditionspeed(threading.Thread, threading.Condition())
print '\n\t######## testing multiprocessing.Condition\n'
test_conditionspeed(multiprocessing.Process, multiprocessing.Condition())
print '\n\t######## testing condition managed by a server process\n'
test_conditionspeed(multiprocessing.Process, manager.Condition())
gc.enable()
if __name__ == '__main__':
multiprocessing.freeze_support()
test()
#
# Module to allow spawning of processes on foreign host
#
# Depends on `multiprocessing` package -- tested with `processing-0.60`
#
__all__ = ['Cluster', 'Host', 'get_logger', 'current_process']
#
# Imports
#
import sys
import os
import tarfile
import shutil
import subprocess
import logging
import itertools
import Queue
try:
import cPickle as pickle
except ImportError:
import pickle
from multiprocessing import Process, current_process, cpu_count
from multiprocessing import util, managers, connection, forking, pool
#
# Logging
#
def get_logger():
return _logger
_logger = logging.getLogger('distributing')
_logger.propogate = 0
util.fix_up_logger(_logger)
_formatter = logging.Formatter(util.DEFAULT_LOGGING_FORMAT)
_handler = logging.StreamHandler()
_handler.setFormatter(_formatter)
_logger.addHandler(_handler)
info = _logger.info
debug = _logger.debug
#
# Get number of cpus
#
try:
slot_count = cpu_count()
except NotImplemented:
slot_count = 1
#
# Manager type which spawns subprocesses
#
class HostManager(managers.SyncManager):
'''
Manager type used for spawning processes on a (presumably) foreign host
'''
def __init__(self, address, authkey):
managers.SyncManager.__init__(self, address, authkey)
self._name = 'Host-unknown'
def Process(self, group=None, target=None, name=None, args=(), kwargs={}):
if hasattr(sys.modules['__main__'], '__file__'):
main_path = os.path.basename(sys.modules['__main__'].__file__)
else:
main_path = None
data = pickle.dumps((target, args, kwargs))
p = self._RemoteProcess(data, main_path)
if name is None:
temp = self._name.split('Host-')[-1] + '/Process-%s'
name = temp % ':'.join(map(str, p.get_identity()))
p.set_name(name)
return p
@classmethod
def from_address(cls, address, authkey):
manager = cls(address, authkey)
managers.transact(address, authkey, 'dummy')
manager._state.value = managers.State.STARTED
manager._name = 'Host-%s:%s' % manager.address
manager.shutdown = util.Finalize(
manager, HostManager._finalize_host,
args=(manager._address, manager._authkey, manager._name),
exitpriority=-10
)
return manager
@staticmethod
def _finalize_host(address, authkey, name):
managers.transact(address, authkey, 'shutdown')
def __repr__(self):
return '<Host(%s)>' % self._name
#
# Process subclass representing a process on (possibly) a remote machine
#
class RemoteProcess(Process):
'''
Represents a process started on a remote host
'''
def __init__(self, data, main_path):
assert not main_path or os.path.basename(main_path) == main_path
Process.__init__(self)
self._data = data
self._main_path = main_path
def _bootstrap(self):
forking.prepare({'main_path': self._main_path})
self._target, self._args, self._kwargs = pickle.loads(self._data)
return Process._bootstrap(self)
def get_identity(self):
return self._identity
HostManager.register('_RemoteProcess', RemoteProcess)
#
# A Pool class that uses a cluster
#
class DistributedPool(pool.Pool):
def __init__(self, cluster, processes=None, initializer=None, initargs=()):
self._cluster = cluster
self.Process = cluster.Process
pool.Pool.__init__(self, processes or len(cluster),
initializer, initargs)
def _setup_queues(self):
self._inqueue = self._cluster._SettableQueue()
self._outqueue = self._cluster._SettableQueue()
self._quick_put = self._inqueue.put
self._quick_get = self._outqueue.get
@staticmethod
def _help_stuff_finish(inqueue, task_handler, size):
inqueue.set_contents([None] * size)
#
# Manager type which starts host managers on other machines
#
def LocalProcess(**kwds):
p = Process(**kwds)
p.set_name('localhost/' + p.get_name())
return p
class Cluster(managers.SyncManager):
'''
Represents collection of slots running on various hosts.
`Cluster` is a subclass of `SyncManager` so it allows creation of
various types of shared objects.
'''
def __init__(self, hostlist, modules):
managers.SyncManager.__init__(self, address=('localhost', 0))
self._hostlist = hostlist
self._modules = modules
if __name__ not in modules:
modules.append(__name__)
files = [sys.modules[name].__file__ for name in modules]
for i, file in enumerate(files):
if file.endswith('.pyc') or file.endswith('.pyo'):
files[i] = file[:-4] + '.py'
self._files = [os.path.abspath(file) for file in files]
def start(self):
managers.SyncManager.start(self)
l = connection.Listener(family='AF_INET', authkey=self._authkey)
for i, host in enumerate(self._hostlist):
host._start_manager(i, self._authkey, l.address, self._files)
for host in self._hostlist:
if host.hostname != 'localhost':
conn = l.accept()
i, address, cpus = conn.recv()
conn.close()
other_host = self._hostlist[i]
other_host.manager = HostManager.from_address(address,
self._authkey)
other_host.slots = other_host.slots or cpus
other_host.Process = other_host.manager.Process
else:
host.slots = host.slots or slot_count
host.Process = LocalProcess
self._slotlist = [
Slot(host) for host in self._hostlist for i in range(host.slots)
]
self._slot_iterator = itertools.cycle(self._slotlist)
self._base_shutdown = self.shutdown
del self.shutdown
def shutdown(self):
for host in self._hostlist:
if host.hostname != 'localhost':
host.manager.shutdown()
self._base_shutdown()
def Process(self, group=None, target=None, name=None, args=(), kwargs={}):
slot = self._slot_iterator.next()
return slot.Process(
group=group, target=target, name=name, args=args, kwargs=kwargs
)
def Pool(self, processes=None, initializer=None, initargs=()):
return DistributedPool(self, processes, initializer, initargs)
def __getitem__(self, i):
return self._slotlist[i]
def __len__(self):
return len(self._slotlist)
def __iter__(self):
return iter(self._slotlist)
#
# Queue subclass used by distributed pool
#
class SettableQueue(Queue.Queue):
def empty(self):
return not self.queue
def full(self):
return self.maxsize > 0 and len(self.queue) == self.maxsize
def set_contents(self, contents):
# length of contents must be at least as large as the number of
# threads which have potentially called get()
self.not_empty.acquire()
try:
self.queue.clear()
self.queue.extend(contents)
self.not_empty.notifyAll()
finally:
self.not_empty.release()
Cluster.register('_SettableQueue', SettableQueue)
#
# Class representing a notional cpu in the cluster
#
class Slot(object):
def __init__(self, host):
self.host = host
self.Process = host.Process
#
# Host
#
class Host(object):
'''
Represents a host to use as a node in a cluster.
`hostname` gives the name of the host. If hostname is not
"localhost" then ssh is used to log in to the host. To log in as
a different user use a host name of the form
"username@somewhere.org"
`slots` is used to specify the number of slots for processes on
the host. This affects how often processes will be allocated to
this host. Normally this should be equal to the number of cpus on
that host.
'''
def __init__(self, hostname, slots=None):
self.hostname = hostname
self.slots = slots
def _start_manager(self, index, authkey, address, files):
if self.hostname != 'localhost':
tempdir = copy_to_remote_temporary_directory(self.hostname, files)
debug('startup files copied to %s:%s', self.hostname, tempdir)
p = subprocess.Popen(
['ssh', self.hostname, 'python', '-c',
'"import os; os.chdir(%r); '
'from distributing import main; main()"' % tempdir],
stdin=subprocess.PIPE
)
data = dict(
name='BoostrappingHost', index=index,
dist_log_level=_logger.getEffectiveLevel(),
dir=tempdir, authkey=str(authkey), parent_address=address
)
pickle.dump(data, p.stdin, pickle.HIGHEST_PROTOCOL)
p.stdin.close()
#
# Copy files to remote directory, returning name of directory
#
unzip_code = '''"
import tempfile, os, sys, tarfile
tempdir = tempfile.mkdtemp(prefix='distrib-')
os.chdir(tempdir)
tf = tarfile.open(fileobj=sys.stdin, mode='r|gz')
for ti in tf:
tf.extract(ti)
print tempdir
"'''
def copy_to_remote_temporary_directory(host, files):
p = subprocess.Popen(
['ssh', host, 'python', '-c', unzip_code],
stdout=subprocess.PIPE, stdin=subprocess.PIPE
)
tf = tarfile.open(fileobj=p.stdin, mode='w|gz')
for name in files:
tf.add(name, os.path.basename(name))
tf.close()
p.stdin.close()
return p.stdout.read().rstrip()
#
# Code which runs a host manager
#
def main():
# get data from parent over stdin
data = pickle.load(sys.stdin)
sys.stdin.close()
# set some stuff
_logger.setLevel(data['dist_log_level'])
forking.prepare(data)
# create server for a `HostManager` object
server = managers.Server(HostManager._registry, ('', 0), data['authkey'])
current_process()._server = server
# report server address and number of cpus back to parent
conn = connection.Client(data['parent_address'], authkey=data['authkey'])
conn.send((data['index'], server.address, slot_count))
conn.close()
# set name etc
current_process().set_name('Host-%s:%s' % server.address)
util._run_after_forkers()
# register a cleanup function
def cleanup(directory):
debug('removing directory %s', directory)
shutil.rmtree(directory)
debug('shutting down host manager')
util.Finalize(None, cleanup, args=[data['dir']], exitpriority=0)
# start host manager
debug('remote host manager starting in %s', data['dir'])
server.serve_forever()
#
# This module shows how to use arbitrary callables with a subclass of
# `BaseManager`.
#
from multiprocessing import freeze_support
from multiprocessing.managers import BaseManager, BaseProxy
import operator
##
class Foo(object):
def f(self):
print 'you called Foo.f()'
def g(self):
print 'you called Foo.g()'
def _h(self):
print 'you called Foo._h()'
# A simple generator function
def baz():
for i in xrange(10):
yield i*i
# Proxy type for generator objects
class GeneratorProxy(BaseProxy):
_exposed_ = ('next', '__next__')
def __iter__(self):
return self
def next(self):
return self._callmethod('next')
def __next__(self):
return self._callmethod('__next__')
# Function to return the operator module
def get_operator_module():
return operator
##
class MyManager(BaseManager):
pass
# register the Foo class; make `f()` and `g()` accessible via proxy
MyManager.register('Foo1', Foo)
# register the Foo class; make `g()` and `_h()` accessible via proxy
MyManager.register('Foo2', Foo, exposed=('g', '_h'))
# register the generator function baz; use `GeneratorProxy` to make proxies
MyManager.register('baz', baz, proxytype=GeneratorProxy)
# register get_operator_module(); make public functions accessible via proxy
MyManager.register('operator', get_operator_module)
##
def test():
manager = MyManager()
manager.start()
print '-' * 20
f1 = manager.Foo1()
f1.f()
f1.g()
assert not hasattr(f1, '_h')
assert sorted(f1._exposed_) == sorted(['f', 'g'])
print '-' * 20
f2 = manager.Foo2()
f2.g()
f2._h()
assert not hasattr(f2, 'f')
assert sorted(f2._exposed_) == sorted(['g', '_h'])
print '-' * 20
it = manager.baz()
for i in it:
print '<%d>' % i,
print
print '-' * 20
op = manager.operator()
print 'op.add(23, 45) =', op.add(23, 45)
print 'op.pow(2, 94) =', op.pow(2, 94)
print 'op.getslice(range(10), 2, 6) =', op.getslice(range(10), 2, 6)
print 'op.repeat(range(5), 3) =', op.repeat(range(5), 3)
print 'op._exposed_ =', op._exposed_
##
if __name__ == '__main__':
freeze_support()
test()
#
# A test of `multiprocessing.Pool` class
#
import multiprocessing
import time
import random
import sys
#
# Functions used by test code
#
def calculate(func, args):
result = func(*args)
return '%s says that %s%s = %s' % (
multiprocessing.current_process().get_name(),
func.__name__, args, result
)
def calculatestar(args):
return calculate(*args)
def mul(a, b):
time.sleep(0.5*random.random())
return a * b
def plus(a, b):
time.sleep(0.5*random.random())
return a + b
def f(x):
return 1.0 / (x-5.0)
def pow3(x):
return x**3
def noop(x):
pass
#
# Test code
#
def test():
print 'cpu_count() = %d\n' % multiprocessing.cpu_count()
#
# Create pool
#
PROCESSES = 4
print 'Creating pool with %d processes\n' % PROCESSES
pool = multiprocessing.Pool(PROCESSES)
print 'pool = %s' % pool
print
#
# Tests
#
TASKS = [(mul, (i, 7)) for i in range(10)] + \
[(plus, (i, 8)) for i in range(10)]
results = [pool.apply_async(calculate, t) for t in TASKS]
imap_it = pool.imap(calculatestar, TASKS)
imap_unordered_it = pool.imap_unordered(calculatestar, TASKS)
print 'Ordered results using pool.apply_async():'
for r in results:
print '\t', r.get()
print
print 'Ordered results using pool.imap():'
for x in imap_it:
print '\t', x
print
print 'Unordered results using pool.imap_unordered():'
for x in imap_unordered_it:
print '\t', x
print
print 'Ordered results using pool.map() --- will block till complete:'
for x in pool.map(calculatestar, TASKS):
print '\t', x
print
#
# Simple benchmarks
#
N = 100000
print 'def pow3(x): return x**3'
t = time.time()
A = map(pow3, xrange(N))
print '\tmap(pow3, xrange(%d)):\n\t\t%s seconds' % \
(N, time.time() - t)
t = time.time()
B = pool.map(pow3, xrange(N))
print '\tpool.map(pow3, xrange(%d)):\n\t\t%s seconds' % \
(N, time.time() - t)
t = time.time()
C = list(pool.imap(pow3, xrange(N), chunksize=N//8))
print '\tlist(pool.imap(pow3, xrange(%d), chunksize=%d)):\n\t\t%s' \
' seconds' % (N, N//8, time.time() - t)
assert A == B == C, (len(A), len(B), len(C))
print
L = [None] * 1000000
print 'def noop(x): pass'
print 'L = [None] * 1000000'
t = time.time()
A = map(noop, L)
print '\tmap(noop, L):\n\t\t%s seconds' % \
(time.time() - t)
t = time.time()
B = pool.map(noop, L)
print '\tpool.map(noop, L):\n\t\t%s seconds' % \
(time.time() - t)
t = time.time()
C = list(pool.imap(noop, L, chunksize=len(L)//8))
print '\tlist(pool.imap(noop, L, chunksize=%d)):\n\t\t%s seconds' % \
(len(L)//8, time.time() - t)
assert A == B == C, (len(A), len(B), len(C))
print
del A, B, C, L
#
# Test error handling
#
print 'Testing error handling:'
try:
print pool.apply(f, (5,))
except ZeroDivisionError:
print '\tGot ZeroDivisionError as expected from pool.apply()'
else:
raise AssertionError, 'expected ZeroDivisionError'
try:
print pool.map(f, range(10))
except ZeroDivisionError:
print '\tGot ZeroDivisionError as expected from pool.map()'
else:
raise AssertionError, 'expected ZeroDivisionError'
try:
print list(pool.imap(f, range(10)))
except ZeroDivisionError:
print '\tGot ZeroDivisionError as expected from list(pool.imap())'
else:
raise AssertionError, 'expected ZeroDivisionError'
it = pool.imap(f, range(10))
for i in range(10):
try:
x = it.next()
except ZeroDivisionError:
if i == 5:
pass
except StopIteration:
break
else:
if i == 5:
raise AssertionError, 'expected ZeroDivisionError'
assert i == 9
print '\tGot ZeroDivisionError as expected from IMapIterator.next()'
print
#
# Testing timeouts
#
print 'Testing ApplyResult.get() with timeout:',
res = pool.apply_async(calculate, TASKS[0])
while 1:
sys.stdout.flush()
try:
sys.stdout.write('\n\t%s' % res.get(0.02))
break
except multiprocessing.TimeoutError:
sys.stdout.write('.')
print
print
print 'Testing IMapIterator.next() with timeout:',
it = pool.imap(calculatestar, TASKS)
while 1:
sys.stdout.flush()
try:
sys.stdout.write('\n\t%s' % it.next(0.02))
except StopIteration:
break
except multiprocessing.TimeoutError:
sys.stdout.write('.')
print
print
#
# Testing callback
#
print 'Testing callback:'
A = []
B = [56, 0, 1, 8, 27, 64, 125, 216, 343, 512, 729]
r = pool.apply_async(mul, (7, 8), callback=A.append)
r.wait()
r = pool.map_async(pow3, range(10), callback=A.extend)
r.wait()
if A == B:
print '\tcallbacks succeeded\n'
else:
print '\t*** callbacks failed\n\t\t%s != %s\n' % (A, B)
#
# Check there are no outstanding tasks
#
assert not pool._cache, 'cache = %r' % pool._cache
#
# Check close() methods
#
print 'Testing close():'
for worker in pool._pool:
assert worker.is_alive()
result = pool.apply_async(time.sleep, [0.5])
pool.close()
pool.join()
assert result.get() is None
for worker in pool._pool:
assert not worker.is_alive()
print '\tclose() succeeded\n'
#
# Check terminate() method
#
print 'Testing terminate():'
pool = multiprocessing.Pool(2)
DELTA = 0.1
ignore = pool.apply(pow3, [2])
results = [pool.apply_async(time.sleep, [DELTA]) for i in range(100)]
pool.terminate()
pool.join()
for worker in pool._pool:
assert not worker.is_alive()
print '\tterminate() succeeded\n'
#
# Check garbage collection
#
print 'Testing garbage collection:'
pool = multiprocessing.Pool(2)
DELTA = 0.1
processes = pool._pool
ignore = pool.apply(pow3, [2])
results = [pool.apply_async(time.sleep, [DELTA]) for i in range(100)]
results = pool = None
time.sleep(DELTA * 2)
for worker in processes:
assert not worker.is_alive()
print '\tgarbage collection succeeded\n'
if __name__ == '__main__':
multiprocessing.freeze_support()
assert len(sys.argv) in (1, 2)
if len(sys.argv) == 1 or sys.argv[1] == 'processes':
print ' Using processes '.center(79, '-')
elif sys.argv[1] == 'threads':
print ' Using threads '.center(79, '-')
import multiprocessing.dummy as multiprocessing
else:
print 'Usage:\n\t%s [processes | threads]' % sys.argv[0]
raise SystemExit(2)
test()
#
# A test file for the `multiprocessing` package
#
import time, sys, random
from Queue import Empty
import multiprocessing # may get overwritten
#### TEST_VALUE
def value_func(running, mutex):
random.seed()
time.sleep(random.random()*4)
mutex.acquire()
print '\n\t\t\t' + str(multiprocessing.current_process()) + ' has finished'
running.value -= 1
mutex.release()
def test_value():
TASKS = 10
running = multiprocessing.Value('i', TASKS)
mutex = multiprocessing.Lock()
for i in range(TASKS):
p = multiprocessing.Process(target=value_func, args=(running, mutex))
p.start()
while running.value > 0:
time.sleep(0.08)
mutex.acquire()
print running.value,
sys.stdout.flush()
mutex.release()
print
print 'No more running processes'
#### TEST_QUEUE
def queue_func(queue):
for i in range(30):
time.sleep(0.5 * random.random())
queue.put(i*i)
queue.put('STOP')
def test_queue():
q = multiprocessing.Queue()
p = multiprocessing.Process(target=queue_func, args=(q,))
p.start()
o = None
while o != 'STOP':
try:
o = q.get(timeout=0.3)
print o,
sys.stdout.flush()
except Empty:
print 'TIMEOUT'
print
#### TEST_CONDITION
def condition_func(cond):
cond.acquire()
print '\t' + str(cond)
time.sleep(2)
print '\tchild is notifying'
print '\t' + str(cond)
cond.notify()
cond.release()
def test_condition():
cond = multiprocessing.Condition()
p = multiprocessing.Process(target=condition_func, args=(cond,))
print cond
cond.acquire()
print cond
cond.acquire()
print cond
p.start()
print 'main is waiting'
cond.wait()
print 'main has woken up'
print cond
cond.release()
print cond
cond.release()
p.join()
print cond
#### TEST_SEMAPHORE
def semaphore_func(sema, mutex, running):
sema.acquire()
mutex.acquire()
running.value += 1
print running.value, 'tasks are running'
mutex.release()
random.seed()
time.sleep(random.random()*2)
mutex.acquire()
running.value -= 1
print '%s has finished' % multiprocessing.current_process()
mutex.release()
sema.release()
def test_semaphore():
sema = multiprocessing.Semaphore(3)
mutex = multiprocessing.RLock()
running = multiprocessing.Value('i', 0)
processes = [
multiprocessing.Process(target=semaphore_func,
args=(sema, mutex, running))
for i in range(10)
]
for p in processes:
p.start()
for p in processes:
p.join()
#### TEST_JOIN_TIMEOUT
def join_timeout_func():
print '\tchild sleeping'
time.sleep(5.5)
print '\n\tchild terminating'
def test_join_timeout():
p = multiprocessing.Process(target=join_timeout_func)
p.start()
print 'waiting for process to finish'
while 1:
p.join(timeout=1)
if not p.is_alive():
break
print '.',
sys.stdout.flush()
#### TEST_EVENT
def event_func(event):
print '\t%r is waiting' % multiprocessing.current_process()
event.wait()
print '\t%r has woken up' % multiprocessing.current_process()
def test_event():
event = multiprocessing.Event()
processes = [multiprocessing.Process(target=event_func, args=(event,))
for i in range(5)]
for p in processes:
p.start()
print 'main is sleeping'
time.sleep(2)
print 'main is setting event'
event.set()
for p in processes:
p.join()
#### TEST_SHAREDVALUES
def sharedvalues_func(values, arrays, shared_values, shared_arrays):
for i in range(len(values)):
v = values[i][1]
sv = shared_values[i].value
assert v == sv
for i in range(len(values)):
a = arrays[i][1]
sa = list(shared_arrays[i][:])
assert a == sa
print 'Tests passed'
def test_sharedvalues():
values = [
('i', 10),
('h', -2),
('d', 1.25)
]
arrays = [
('i', range(100)),
('d', [0.25 * i for i in range(100)]),
('H', range(1000))
]
shared_values = [multiprocessing.Value(id, v) for id, v in values]
shared_arrays = [multiprocessing.Array(id, a) for id, a in arrays]
p = multiprocessing.Process(
target=sharedvalues_func,
args=(values, arrays, shared_values, shared_arrays)
)
p.start()
p.join()
assert p.get_exitcode() == 0
####
def test(namespace=multiprocessing):
global multiprocessing
multiprocessing = namespace
for func in [ test_value, test_queue, test_condition,
test_semaphore, test_join_timeout, test_event,
test_sharedvalues ]:
print '\n\t######## %s\n' % func.__name__
func()
ignore = multiprocessing.active_children() # cleanup any old processes
if hasattr(multiprocessing, '_debug_info'):
info = multiprocessing._debug_info()
if info:
print info
raise ValueError, 'there should be no positive refcounts left'
if __name__ == '__main__':
multiprocessing.freeze_support()
assert len(sys.argv) in (1, 2)
if len(sys.argv) == 1 or sys.argv[1] == 'processes':
print ' Using processes '.center(79, '-')
namespace = multiprocessing
elif sys.argv[1] == 'manager':
print ' Using processes and a manager '.center(79, '-')
namespace = multiprocessing.Manager()
namespace.Process = multiprocessing.Process
namespace.current_process = multiprocessing.current_process
namespace.active_children = multiprocessing.active_children
elif sys.argv[1] == 'threads':
print ' Using threads '.center(79, '-')
import multiprocessing.dummy as namespace
else:
print 'Usage:\n\t%s [processes | manager | threads]' % sys.argv[0]
raise SystemExit, 2
test(namespace)
#
# Example where a pool of http servers share a single listening socket
#
# On Windows this module depends on the ability to pickle a socket
# object so that the worker processes can inherit a copy of the server
# object. (We import `multiprocessing.reduction` to enable this pickling.)
#
# Not sure if we should synchronize access to `socket.accept()` method by
# using a process-shared lock -- does not seem to be necessary.
#
import os
import sys
from multiprocessing import Process, current_process, freeze_support
from BaseHTTPServer import HTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
if sys.platform == 'win32':
import multiprocessing.reduction # make sockets pickable/inheritable
def note(format, *args):
sys.stderr.write('[%s]\t%s\n' % (current_process().get_name(),format%args))
class RequestHandler(SimpleHTTPRequestHandler):
# we override log_message() to show which process is handling the request
def log_message(self, format, *args):
note(format, *args)
def serve_forever(server):
note('starting server')
try:
server.serve_forever()
except KeyboardInterrupt:
pass
def runpool(address, number_of_processes):
# create a single server object -- children will each inherit a copy
server = HTTPServer(address, RequestHandler)
# create child processes to act as workers
for i in range(number_of_processes-1):
Process(target=serve_forever, args=(server,)).start()
# main process also acts as a worker
serve_forever(server)
def test():
DIR = os.path.join(os.path.dirname(__file__), '..')
ADDRESS = ('localhost', 8000)
NUMBER_OF_PROCESSES = 4
print 'Serving at http://%s:%d using %d worker processes' % \
(ADDRESS[0], ADDRESS[1], NUMBER_OF_PROCESSES)
print 'To exit press Ctrl-' + ['C', 'Break'][sys.platform=='win32']
os.chdir(DIR)
runpool(ADDRESS, NUMBER_OF_PROCESSES)
if __name__ == '__main__':
freeze_support()
test()
#
# Simple example which uses a pool of workers to carry out some tasks.
#
# Notice that the results will probably not come out of the output
# queue in the same in the same order as the corresponding tasks were
# put on the input queue. If it is important to get the results back
# in the original order then consider using `Pool.map()` or
# `Pool.imap()` (which will save on the amount of code needed anyway).
#
import time
import random
from multiprocessing import Process, Queue, current_process, freeze_support
#
# Function run by worker processes
#
def worker(input, output):
for func, args in iter(input.get, 'STOP'):
result = calculate(func, args)
output.put(result)
#
# Function used to calculate result
#
def calculate(func, args):
result = func(*args)
return '%s says that %s%s = %s' % \
(current_process().get_name(), func.__name__, args, result)
#
# Functions referenced by tasks
#
def mul(a, b):
time.sleep(0.5*random.random())
return a * b
def plus(a, b):
time.sleep(0.5*random.random())
return a + b
#
#
#
def test():
NUMBER_OF_PROCESSES = 4
TASKS1 = [(mul, (i, 7)) for i in range(20)]
TASKS2 = [(plus, (i, 8)) for i in range(10)]
# Create queues
task_queue = Queue()
done_queue = Queue()
# Submit tasks
for task in TASKS1:
task_queue.put(task)
# Start worker processes
for i in range(NUMBER_OF_PROCESSES):
Process(target=worker, args=(task_queue, done_queue)).start()
# Get and print results
print 'Unordered results:'
for i in range(len(TASKS1)):
print '\t', done_queue.get()
# Add more tasks using `put()`
for task in TASKS2:
task_queue.put(task)
# Get and print some more results
for i in range(len(TASKS2)):
print '\t', done_queue.get()
# Tell child processes to stop
for i in range(NUMBER_OF_PROCESSES):
task_queue.put('STOP')
if __name__ == '__main__':
freeze_support()
test()
:mod:`multiprocessing` --- Process-based "threading" interface
==============================================================
.. module:: multiprocessing
:synopsis: Process-based "threading" interface.
.. versionadded:: 2.6
:mod:`multiprocessing` is a package for the Python language which supports the
spawning of processes using a similar API of the :mod:`threading` module. It
runs on both Unix and Windows.
The :mod:`multiprocessing` module offers the capability of both local and remote
concurrency effectively side-stepping the Global Interpreter Lock by utilizing
subprocesses for "threads". Due to this, the :mod:`multiprocessing` module
allows the programmer to fully leverage multiple processors on a given machine.
Introduction
------------
Threads, processes and the GIL
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
To run more than one piece of code at the same time on the same computer one has
the choice of either using multiple processes or multiple threads.
Although a program can be made up of multiple processes, these processes are in
effect completely independent of one another: different processes are not able
to cooperate with one another unless one sets up some means of communication
between them (such as by using sockets). If a lot of data must be transferred
between processes then this can be inefficient.
On the other hand, multiple threads within a single process are intimately
connected: they share their data but often can interfere badly with one another.
It is often argued that the only way to make multithreaded programming "easy" is
to avoid relying on any shared state and for the threads to only communicate by
passing messages to each other.
CPython has a *Global Interpreter Lock* (GIL) which in many ways makes threading
easier than it is in most languages by making sure that only one thread can
manipulate the interpreter's objects at a time. As a result, it is often safe
to let multiple threads access data without using any additional locking as one
would need to in a language such as C.
One downside of the GIL is that on multi-processor (or multi-core) systems a
multithreaded Python program can only make use of one processor at a time unless
your application makes heavy use of I/O which effectively side-steps this. This
is a problem that can be overcome by using multiple processes instead.
This package allows one to write multi-process programs using much the same API
that one uses for writing threaded programs.
Forking and spawning
~~~~~~~~~~~~~~~~~~~~
There are two ways of creating a new process in Python:
* The current process can *fork* a new child process by using the
:func:`os.fork` function. This effectively creates an identical copy of the
current process which is now able to go off and perform some task set by the
parent process. This means that the child process inherits *copies* of all
variables that the parent process had. However, :func:`os.fork` is not
available on every platform: in particular Windows does not support it.
* Alternatively, the current process can spawn a completely new Python
interpreter by using the :mod:`subprocess` module or one of the
:func:`os.spawn*` functions. Getting this new interpreter in to a fit state
to perform the task set for it by its parent process is, however, a bit of a
challenge.
The :mod:`multiprocessing` module uses :func:`os.fork` if it is available since
it makes life a lot simpler. Forking the process is also more efficient in
terms of memory usage and the time needed to create the new process.
The :class:`Process` class
~~~~~~~~~~~~~~~~~~~~~~~~~~
In :mod:`multiprocessing`, processes are spawned by creating a :class:`Process`
object and then calling its :meth:`Process.start` method. :class:`Process`
follows the API of :class:`threading.Thread`. A trivial example of a
multiprocess program is ::
from multiprocessing import Process
def f(name):
print 'hello', name
if __name__ == '__main__':
p = Process(target=f, args=('bob',))
p.start()
p.join()
Here the function ``f`` is run in a child process.
For an explanation of why (on Windows) the ``if __name__ == '__main__'`` part is
necessary, see :ref:`multiprocessing-programming`.
Exchanging objects between processes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:mod:`multiprocessing` supports two types of communication channel between
processes:
**Queues**
The :class:`Queue` class is a near clone of :class:`Queue.Queue`. For
example::
from multiprocessing import Process, Queue
def f(q):
q.put([42, None, 'hello'])
if __name__ == '__main__':
q = Queue()
p = Process(target=f, args=(q,))
p.start()
print q.get() # prints "[42, None, 'hello']"
p.join()
Queues are thread and process safe.
**Pipes**
The :func:`Pipe` function returns a pair of connection objects connected by a
pipe which by default is duplex (two-way). For example::
from multiprocessing import Process, Pipe
def f(conn):
conn.send([42, None, 'hello'])
conn.close()
if __name__ == '__main__':
parent_conn, child_conn = Pipe()
p = Process(target=f, args=(child_conn,))
p.start()
print parent_conn.recv() # prints "[42, None, 'hello']"
p.join()
The two connection objects returned by :func:`Pipe` represent the two ends of
the pipe. Each connection object has :meth:`send` and :meth:`recv` methods
(among others). Note that data in a pipe may become corrupted if two
processes (or threads) try to read from or write to the *same* end of the
pipe at the same time. Of course there is no risk of corruption from
processes using different ends of the pipe at the same time.
Synchronization between processes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:mod:`multiprocessing` contains equivalents of all the synchronization
primitives from :mod:`threading`. For instance one can use a lock to ensure
that only one process prints to standard output at a time::
from multiprocessing import Process, Lock
def f(l, i):
l.acquire()
print 'hello world', i
l.release()
if __name__ == '__main__':
lock = Lock()
for num in range(10):
Process(target=f, args=(lock, num)).start()
Without using the lock output from the different processes is liable to get all
mixed up.
Sharing state between processes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
As mentioned above, when doing concurrent programming it is usually best to
avoid using shared state as far as possible. This is particularly true when
using multiple processes.
However, if you really do need to use some shared data then
:mod:`multiprocessing` provides a couple of ways of doing so.
**Shared memory**
Data can be stored in a shared memory map using :class:`Value` or
:class:`Array`. For example, the following code ::
from multiprocessing import Process, Value, Array
def f(n, a):
n.value = 3.1415927
for i in range(len(a)):
a[i] = -a[i]
if __name__ == '__main__':
num = Value('d', 0.0)
arr = Array('i', range(10))
p = Process(target=f, args=(num, arr))
p.start()
p.join()
print num.value
print arr[:]
will print ::
3.1415927
[0, -1, -2, -3, -4, -5, -6, -7, -8, -9]
The ``'d'`` and ``'i'`` arguments used when creating ``num`` and ``arr`` are
typecodes of the kind used by the :mod:`array` module: ``'d'`` indicates a
double precision float and ``'i'`` inidicates a signed integer. These shared
objects will be process and thread safe.
For more flexibility in using shared memory one can use the
:mod:`multiprocessing.sharedctypes` module which supports the creation of
arbitrary ctypes objects allocated from shared memory.
**Server process**
A manager object returned by :func:`Manager` controls a server process which
holds python objects and allows other processes to manipulate them using
proxies.
A manager returned by :func:`Manager` will support types :class:`list`,
:class:`dict`, :class:`Namespace`, :class:`Lock`, :class:`RLock`,
:class:`Semaphore`, :class:`BoundedSemaphore`, :class:`Condition`,
:class:`Event`, :class:`Queue`, :class:`Value` and :class:`Array`. For
example, ::
from multiprocessing import Process, Manager
def f(d, l):
d[1] = '1'
d['2'] = 2
d[0.25] = None
l.reverse()
if __name__ == '__main__':
manager = Manager()
d = manager.dict()
l = manager.list(range(10))
p = Process(target=f, args=(d, l))
p.start()
p.join()
print d
print l
will print ::
{0.25: None, 1: '1', '2': 2}
[9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
Server process managers are more flexible than using shared memory objects
because they can be made to support arbitrary object types. Also, a single
manager can be shared by processes on different computers over a network.
They are, however, slower than using shared memory.
Using a pool of workers
~~~~~~~~~~~~~~~~~~~~~~~
The :class:`multiprocessing.pool.Pool()` class represens a pool of worker
processes. It has methods which allows tasks to be offloaded to the worker
processes in a few different ways.
For example::
from multiprocessing import Pool
def f(x):
return x*x
if __name__ == '__main__':
pool = Pool(processes=4) # start 4 worker processes
result = pool.applyAsync(f, [10]) # evaluate "f(10)" asynchronously
print result.get(timeout=1) # prints "100" unless your computer is *very* slow
print pool.map(f, range(10)) # prints "[0, 1, 4,..., 81]"
Reference
---------
The :mod:`multiprocessing` package mostly replicates the API of the
:mod:`threading` module.
:class:`Process` and exceptions
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. class:: Process([group[, target[, name[, args[, kwargs]]]]])
Process objects represent activity that is run in a separate process. The
:class:`Process` class has equivalents of all the methods of
:class:`threading.Thread`.
The constructor should always be called with keyword arguments. *group*
should always be ``None``; it exists soley for compatibility with
:class:`threading.Thread`. *target* is the callable object to be invoked by
the :meth:`run()` method. It defaults to None, meaning nothing is
called. *name* is the process name. By default, a unique name is constructed
of the form 'Process-N\ :sub:`1`:N\ :sub:`2`:...:N\ :sub:`k`' where N\
:sub:`1`,N\ :sub:`2`,...,N\ :sub:`k` is a sequence of integers whose length
is determined by the *generation* of the process. *args* is the argument
tuple for the target invocation. *kwargs* is a dictionary of keyword
arguments for the target invocation. By default, no arguments are passed to
*target*.
If a subclass overrides the constructor, it must make sure it invokes the
base class constructor (:meth:`Process.__init__`) before doing anything else
to the process.
.. method:: run()
Method representing the process's activity.
You may override this method in a subclass. The standard :meth:`run`
method invokes the callable object passed to the object's constructor as
the target argument, if any, with sequential and keyword arguments taken
from the *args* and *kwargs* arguments, respectively.
.. method:: start()
Start the process's activity.
This must be called at most once per process object. It arranges for the
object's :meth:`run` method to be invoked in a separate process.
.. method:: join([timeout])
Block the calling thread until the process whose :meth:`join` method is
called terminates or until the optional timeout occurs.
If *timeout* is ``None`` then there is no timeout.
A process can be joined many times.
A process cannot join itself because this would cause a deadlock. It is
an error to attempt to join a process before it has been started.
.. method:: get_name()
Return the process's name.
.. method:: set_name(name)
Set the process's name.
The name is a string used for identification purposes only. It has no
semantics. Multiple processes may be given the same name. The initial
name is set by the constructor.
.. method:: is_alive()
Return whether the process is alive.
Roughly, a process object is alive from the moment the :meth:`start`
method returns until the child process terminates.
.. method:: is_daemon()
Return the process's daemon flag.
.. method:: set_daemon(daemonic)
Set the process's daemon flag to the Boolean value *daemonic*. This must
be called before :meth:`start` is called.
The initial value is inherited from the creating process.
When a process exits, it attempts to terminate all of its daemonic child
processes.
Note that a daemonic process is not allowed to create child processes.
Otherwise a daemonic process would leave its children orphaned if it gets
terminated when its parent process exits.
In addition process objects also support the following methods:
.. method:: get_pid()
Return the process ID. Before the process is spawned, this will be
``None``.
.. method:: get_exit_code()
Return the child's exit code. This will be ``None`` if the process has
not yet terminated. A negative value *-N* indicates that the child was
terminated by signal *N*.
.. method:: get_auth_key()
Return the process's authentication key (a byte string).
When :mod:`multiprocessing` is initialized the main process is assigned a
random string using :func:`os.random`.
When a :class:`Process` object is created, it will inherit the
authentication key of its parent process, although this may be changed
using :meth:`set_auth_key` below.
See :ref:`multiprocessing-auth-keys`.
.. method:: set_auth_key(authkey)
Set the process's authentication key which must be a byte string.
.. method:: terminate()`
Terminate the process. On Unix this is done using the ``SIGTERM`` signal,
on Windows ``TerminateProcess()`` is used. Note that exit handlers and
finally clauses etc will not be executed.
Note that descendant processes of the process will *not* be terminated --
they will simply become orphaned.
.. warning::
If this method is used when the associated process is using a pipe or
queue then the pipe or queue is liable to become corrupted and may
become unusable by other process. Similarly, if the process has
acquired a lock or semaphore etc. then terminating it is liable to
cause other processes to deadlock.
Note that the :meth:`start`, :meth:`join`, :meth:`is_alive` and
:meth:`get_exit_code` methods should only be called by the process that
created the process object.
Example usage of some of the methods of :class:`Process`::
>>> import processing, time, signal
>>> p = processing.Process(target=time.sleep, args=(1000,))
>>> print p, p.is_alive()
<Process(Process-1, initial)> False
>>> p.start()
>>> print p, p.is_alive()
<Process(Process-1, started)> True
>>> p.terminate()
>>> print p, p.is_alive()
<Process(Process-1, stopped[SIGTERM])> False
>>> p.get_exit_code() == -signal.SIGTERM
True
.. exception:: BufferTooShort
Exception raised by :meth:`Connection.recv_bytes_into()` when the supplied
buffer object is too small for the message read.
If ``e`` is an instance of :exc:`BufferTooShort` then ``e.args[0]`` will give
the message as a byte string.
Pipes and Queues
~~~~~~~~~~~~~~~~
When using multiple processes, one generally uses message passing for
communication between processes and avoids having to use any synchronization
primitives like locks.
For passing messages one can use :func:`Pipe` (for a connection between two
processes) or a queue (which allows multiple producers and consumers).
The :class:`Queue` and :class:`JoinableQueue` types are multi-producer,
multi-consumer FIFO queues modelled on the :class:`Queue.Queue` class in the
standard library. They differ in that :class:`Queue` lacks the
:meth:`task_done` and :meth:`join` methods introduced into Python 2.5's
:class:`Queue.Queue` class.
If you use :class:`JoinableQueue` then you **must** call
:meth:`JoinableQueue.task_done` for each task removed from the queue or else the
semaphore used to count the number of unfinished tasks may eventually overflow
raising an exception.
.. note::
:mod:`multiprocessing` uses the usual :exc:`Queue.Empty` and
:exc:`Queue.Full` exceptions to signal a timeout. They are not available in
the :mod:`multiprocessing` namespace so you need to import them from
:mod:`Queue`.
.. warning::
If a process is killed using :meth:`Process.terminate` or :func:`os.kill`
while it is trying to use a :class:`Queue`, then the data in the queue is
likely to become corrupted. This may cause any other processes to get an
exception when it tries to use the queue later on.
.. warning::
As mentioned above, if a child process has put items on a queue (and it has
not used :meth:`JoinableQueue.cancel_join_thread`), then that process will
not terminate until all buffered items have been flushed to the pipe.
This means that if you try joining that process you may get a deadlock unless
you are sure that all items which have been put on the queue have been
consumed. Similarly, if the child process is non-daemonic then the parent
process may hang on exit when it tries to join all it non-daemonic children.
Note that a queue created using a manager does not have this issue. See
:ref:`multiprocessing-programming`.
Note that one can also create a shared queue by using a manager object -- see
:ref:`multiprocessing-managers`.
For an example of the usage of queues for interprocess communication see
:ref:`multiprocessing-examples`.
.. function:: Pipe([duplex])
Returns a pair ``(conn1, conn2)`` of :class:`Connection` objects representing
the ends of a pipe.
If *duplex* is ``True`` (the default) then the pipe is bidirectional. If
*duplex* is ``False`` then the pipe is unidirectional: ``conn1`` can only be
used for receiving messages and ``conn2`` can only be used for sending
messages.
.. class:: Queue([maxsize])
Returns a process shared queue implemented using a pipe and a few
locks/semaphores. When a process first puts an item on the queue a feeder
thread is started which transfers objects from a buffer into the pipe.
The usual :exc:`Queue.Empty` and :exc:`Queue.Full` exceptions from the
standard library's :mod:`Queue` module are raised to signal timeouts.
:class:`Queue` implements all the methods of :class:`Queue.Queue` except for
:meth:`task_done` and :meth:`join`.
.. method:: qsize()
Return the approximate size of the queue. Because of
multithreading/multiprocessing semantics, this number is not reliable.
Note that this may raise :exc:`NotImplementedError` on Unix platforms like
MacOS X where ``sem_getvalue()`` is not implemented.
.. method:: empty()
Return ``True`` if the queue is empty, ``False`` otherwise. Because of
multithreading/multiprocessing semantics, this is not reliable.
.. method:: full()
Return ``True`` if the queue is full, ``False`` otherwise. Because of
multithreading/multiprocessing semantics, this is not reliable.
.. method:: put(item[, block[, timeout]])`
Put item into the queue. If optional args *block* is ``True`` (the
default) and *timeout* is ``None`` (the default), block if necessary until
a free slot is available. If *timeout* is a positive number, it blocks at
most *timeout* seconds and raises the :exc:`Queue.Full` exception if no
free slot was available within that time. Otherwise (*block* is
``False``), put an item on the queue if a free slot is immediately
available, else raise the :exc:`Queue.Full` exception (*timeout* is
ignored in that case).
.. method:: put_nowait(item)
Equivalent to ``put(item, False)``.
.. method:: get([block[, timeout]])
Remove and return an item from the queue. If optional args *block* is
``True`` (the default) and *timeout* is ``None`` (the default), block if
necessary until an item is available. If *timeout* is a positive number,
it blocks at most *timeout* seconds and raises the :exc:`Queue.Empty`
exception if no item was available within that time. Otherwise (block is
``False``), return an item if one is immediately available, else raise the
:exc:`Queue.Empty` exception (*timeout* is ignored in that case).
.. method:: get_nowait()
get_no_wait()
Equivalent to ``get(False)``.
:class:`multiprocessing.Queue` has a few additional methods not found in
:class:`Queue.Queue` which are usually unnecessary:
.. method:: close()
Indicate that no more data will be put on this queue by the current
process. The background thread will quit once it has flushed all buffered
data to the pipe. This is called automatically when the queue is garbage
collected.
.. method:: join_thread()
Join the background thread. This can only be used after :meth:`close` has
been called. It blocks until the background thread exits, ensuring that
all data in the buffer has been flushed to the pipe.
By default if a process is not the creator of the queue then on exit it
will attempt to join the queue's background thread. The process can call
:meth:`cancel_join_thread()` to make :meth:`join_thread()` do nothing.
.. method:: cancel_join_thread()
Prevent :meth:`join_thread` from blocking. In particular, this prevents
the background thread from being joined automatically when the process
exits -- see :meth:`join_thread()`.
.. class:: JoinableQueue([maxsize])
:class:`JoinableQueue`, a :class:`Queue` subclass, is a queue which
additionally has :meth:`task_done` and :meth:`join` methods.
.. method:: task_done()
Indicate that a formerly enqueued task is complete. Used by queue consumer
threads. For each :meth:`get` used to fetch a task, a subsequent call to
:meth:`task_done` tells the queue that the processing on the task is
complete.
If a :meth:`join` is currently blocking, it will resume when all items
have been processed (meaning that a :meth:`task_done` call was received
for every item that had been :meth:`put` into the queue).
Raises a :exc:`ValueError` if called more times than there were items
placed in the queue.
.. method:: join()
Block until all items in the queue have been gotten and processed.
The count of unfinished tasks goes up whenever an item is added to the
queue. The count goes down whenever a consumer thread calls
:meth:`task_done` to indicate that the item was retrieved and all work on
it is complete. When the count of unfinished tasks drops to zero,
:meth:`join` unblocks.
Miscellaneous
~~~~~~~~~~~~~
.. function:: active_children()
Return list of all live children of the current process.
Calling this has the side affect of "joining" any processes which have
already finished.
.. function:: cpu_count()
Return the number of CPUs in the system. May raise
:exc:`NotImplementedError`.
.. function:: current_process()
Return the :class:`Process` object corresponding to the current process.
An analogue of :func:`threading.current_thread`.
.. function:: freeze_support()
Add support for when a program which uses :mod:`multiprocessing` has been
frozen to produce a Windows executable. (Has been tested with **py2exe**,
**PyInstaller** and **cx_Freeze**.)
One needs to call this function straight after the ``if __name__ ==
'__main__'`` line of the main module. For example::
from multiprocessing import Process, freeze_support
def f():
print 'hello world!'
if __name__ == '__main__':
freeze_support()
Process(target=f).start()
If the :func:`freeze_support()` line is missed out then trying to run the
frozen executable will raise :exc:`RuntimeError`.
If the module is being run normally by the Python interpreter then
:func:`freeze_support()` has no effect.
.. function:: set_executable()
Sets the path of the python interpreter to use when starting a child process.
(By default `sys.executable` is used). Embedders will probably need to do
some thing like ::
setExecutable(os.path.join(sys.exec_prefix, 'pythonw.exe'))
before they can create child processes. (Windows only)
.. note::
:mod:`multiprocessing` contains no analogues of
:func:`threading.active_count`, :func:`threading.enumerate`,
:func:`threading.settrace`, :func:`threading.setprofile`,
:class:`threading.Timer`, or :class:`threading.local`.
Connection Objects
~~~~~~~~~~~~~~~~~~
Connection objects allow the sending and receiving of picklable objects or
strings. They can be thought of as message oriented connected sockets.
Connection objects usually created using :func:`Pipe()` -- see also
:ref:`multiprocessing-listeners-clients`.
.. class:: Connection
.. method:: send(obj)
Send an object to the other end of the connection which should be read
using :meth:`recv`.
The object must be picklable.
.. method:: recv()
Return an object sent from the other end of the connection using
:meth:`send`. Raises :exc:`EOFError` if there is nothing left to receive
and the other end was closed.
.. method:: fileno()
Returns the file descriptor or handle used by the connection.
.. method:: close()
Close the connection.
This is called automatically when the connection is garbage collected.
.. method:: poll([timeout])
Return whether there is any data available to be read.
If *timeout* is not specified then it will return immediately. If
*timeout* is a number then this specifies the maximum time in seconds to
block. If *timeout* is ``None`` then an infinite timeout is used.
.. method:: send_bytes(buffer[, offset[, size]])
Send byte data from an object supporting the buffer interface as a
complete message.
If *offset* is given then data is read from that position in *buffer*. If
*size* is given then that many bytes will be read from buffer.
.. method:: recv_bytes([maxlength])
Return a complete message of byte data sent from the other end of the
connection as a string. Raises :exc:`EOFError` if there is nothing left
to receive and the other end has closed.
If *maxlength* is specified and the message is longer than *maxlength*
then :exc:`IOError` is raised and the connection will no longer be
readable.
.. method:: recv_bytes_into(buffer[, offset])
Read into *buffer* a complete message of byte data sent from the other end
of the connection and return the number of bytes in the message. Raises
:exc:`EOFError` if there is nothing left to receive and the other end was
closed.
*buffer* must be an object satisfying the writable buffer interface. If
*offset* is given then the message will be written into the buffer from
*that position. Offset must be a non-negative integer less than the
*length of *buffer* (in bytes).
If the buffer is too short then a :exc:`BufferTooShort` exception is
raised and the complete message is available as ``e.args[0]`` where ``e``
is the exception instance.
For example:
>>> from multiprocessing import Pipe
>>> a, b = Pipe()
>>> a.send([1, 'hello', None])
>>> b.recv()
[1, 'hello', None]
>>> b.send_bytes('thank you')
>>> a.recv_bytes()
'thank you'
>>> import array
>>> arr1 = array.array('i', range(5))
>>> arr2 = array.array('i', [0] * 10)
>>> a.send_bytes(arr1)
>>> count = b.recv_bytes_into(arr2)
>>> assert count == len(arr1) * arr1.itemsize
>>> arr2
array('i', [0, 1, 2, 3, 4, 0, 0, 0, 0, 0])
.. warning::
The :meth:`Connection.recv` method automatically unpickles the data it
receives, which can be a security risk unless you can trust the process
which sent the message.
Therefore, unless the connection object was produced using :func:`Pipe()`
you should only use the `recv()` and `send()` methods after performing some
sort of authentication. See :ref:`multiprocessing-auth-keys`.
.. warning::
If a process is killed while it is trying to read or write to a pipe then
the data in the pipe is likely to become corrupted, because it may become
impossible to be sure where the message boundaries lie.
Synchronization primitives
~~~~~~~~~~~~~~~~~~~~~~~~~~
Generally synchronization primitives are not as necessary in a multiprocess
program as they are in a mulithreaded program. See the documentation for the
standard library's :mod:`threading` module.
Note that one can also create synchronization primitives by using a manager
object -- see :ref:`multiprocessing-managers`.
.. class:: BoundedSemaphore([value])
A bounded semaphore object: a clone of :class:`threading.BoundedSemaphore`.
(On Mac OSX this is indistiguishable from :class:`Semaphore` because
``sem_getvalue()`` is not implemented on that platform).
.. class:: Condition([lock])
A condition variable: a clone of `threading.Condition`.
If *lock* is specified then it should be a :class:`Lock` or :class:`RLock`
object from :mod:`multiprocessing`.
.. class:: Event()
A clone of :class:`threading.Event`.
.. class:: Lock()
A non-recursive lock object: a clone of :class:`threading.Lock`.
.. class:: RLock()
A recursive lock object: a clone of :class:`threading.RLock`.
.. class:: Semaphore([value])
A bounded semaphore object: a clone of :class:`threading.Semaphore`.
.. note::
The :meth:`acquire()` method of :class:`BoundedSemaphore`, :class:`Lock`,
:class:`RLock` and :class:`Semaphore` has a timeout parameter not supported
by the equivalents in :mod:`threading`. The signature is
``acquire(block=True, timeout=None)`` with keyword parameters being
acceptable. If *block* is ``True`` and *timeout* is not ``None`` then it
specifies a timeout in seconds. If *block* is ``False`` then *timeout* is
ignored.
.. note::
If the SIGINT signal generated by Ctrl-C arrives while the main thread is
blocked by a call to :meth:`BoundedSemaphore.acquire`, :meth:`Lock.acquire`,
:meth:`RLock.acquire`, :meth:`Semaphore.acquire`, :meth:`Condition.acquire`
or :meth:`Condition.wait` then the call will be immediately interrupted and
:exc:`KeyboardInterrupt` will be raised.
This differs from the behaviour of :mod:`threading` where SIGINT will be
ignored while the equivalent blocking calls are in progress.
Shared :mod:`ctypes` Objects
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
It is possible to create shared objects using shared memory which can be
inherited by child processes.
.. function:: Value(typecode_or_type[, lock[, *args]])
Return a :mod:`ctypes` object allocated from shared memory. By default the
return value is actually a synchronized wrapper for the object.
*typecode_or_type* determines the type of the returned object: it is either a
ctypes type or a one character typecode of the kind used by the :mod:`array`
module. *\*args* is passed on to the constructor for the type.
If *lock* is ``True`` (the default) then a new lock object is created to
synchronize access to the value. If *lock* is a :class:`Lock` or
:class:`RLock` object then that will be used to synchronize access to the
value. If *lock* is ``False`` then access to the returned object will not be
automatically protected by a lock, so it will not necessarily be
"process-safe".
Note that *lock* is a keyword-only argument.
.. function:: Array(typecode_or_type, size_or_initializer, *, lock=True)
Return a ctypes array allocated from shared memory. By default the return
value is actually a synchronized wrapper for the array.
*typecode_or_type* determines the type of the elements of the returned array:
it is either a ctypes type or a one character typecode of the kind used by
the :mod:`array` module. If *size_or_initializer* is an integer, then it
determines the length of the array, and the array will be initially zeroed.
Otherwise, *size_or_initializer* is a sequence which is used to initialize
the array and whose length determines the length of the array.
If *lock* is ``True`` (the default) then a new lock object is created to
synchronize access to the value. If *lock* is a :class:`Lock` or
:class:`RLock` object then that will be used to synchronize access to the
value. If *lock* is ``False`` then access to the returned object will not be
automatically protected by a lock, so it will not necessarily be
"process-safe".
Note that *lock* is a keyword only argument.
Note that an array of :data:`ctypes.c_char` has *value* and *rawvalue*
attributes which allow one to use it to store and retrieve strings.
The :mod:`multiprocessing.sharedctypes` module
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
.. module:: multiprocessing.sharedctypes
:synopsis: Allocate ctypes objects from shared memory.
The :mod:`multiprocessing.sharedctypes` module provides functions for allocating
:mod:`ctypes` objects from shared memory which can be inherited by child
processes.
.. note::
Although it is posible to store a pointer in shared memory remember that this
will refer to a location in the address space of a specific process.
However, the pointer is quite likely to be invalid in the context of a second
process and trying to dereference the pointer from the second process may
cause a crash.
.. function:: RawArray(typecode_or_type, size_or_initializer)
Return a ctypes array allocated from shared memory.
*typecode_or_type* determines the type of the elements of the returned array:
it is either a ctypes type or a one character typecode of the kind used by
the :mod:`array` module. If *size_or_initializer* is an integer then it
determines the length of the array, and the array will be initially zeroed.
Otherwise *size_or_initializer* is a sequence which is used to initialize the
array and whose length determines the length of the array.
Note that setting and getting an element is potentially non-atomic -- use
:func:`Array` instead to make sure that access is automatically synchronized
using a lock.
.. function:: RawValue(typecode_or_type, *args)
Return a ctypes object allocated from shared memory.
*typecode_or_type* determines the type of the returned object: it is either a
ctypes type or a one character typecode of the kind used by the :mod:`array`
module. */*args* is passed on to the constructor for the type.
Note that setting and getting the value is potentially non-atomic -- use
:func:`Value` instead to make sure that access is automatically synchronized
using a lock.
Note that an array of :data:`ctypes.c_char` has ``value`` and ``rawvalue``
attributes which allow one to use it to store and retrieve strings -- see
documentation for :mod:`ctypes`.
.. function:: Array(typecode_or_type, size_or_initializer[, lock[, *args]])
The same as :func:`RawArray` except that depending on the value of *lock* a
process-safe synchronization wrapper may be returned instead of a raw ctypes
array.
If *lock* is ``True`` (the default) then a new lock object is created to
synchronize access to the value. If *lock* is a :class:`Lock` or
:class:`RLock` object then that will be used to synchronize access to the
value. If *lock* is ``False`` then access to the returned object will not be
automatically protected by a lock, so it will not necessarily be
"process-safe".
Note that *lock* is a keyword-only argument.
.. function:: Value(typecode_or_type, *args[, lock])
The same as :func:`RawValue` except that depending on the value of *lock* a
process-safe synchronization wrapper may be returned instead of a raw ctypes
object.
If *lock* is ``True`` (the default) then a new lock object is created to
synchronize access to the value. If *lock* is a :class:`Lock` or
:class:`RLock` object then that will be used to synchronize access to the
value. If *lock* is ``False`` then access to the returned object will not be
automatically protected by a lock, so it will not necessarily be
"process-safe".
Note that *lock* is a keyword-only argument.
.. function:: copy(obj)
Return a ctypes object allocated from shared memory which is a copy of the
ctypes object *obj*.
.. function:: synchronized(obj[, lock])
Return a process-safe wrapper object for a ctypes object which uses *lock* to
synchronize access. If *lock* is ``None`` (the default) then a
:class:`multiprocessing.RLock` object is created automatically.
A synchronized wrapper will have two methods in addition to those of the
object it wraps: :meth:`get_obj()` returns the wrapped object and
:meth:`get_lock()` returns the lock object used for synchronization.
Note that accessing the ctypes object through the wrapper can be a lot slower
han accessing the raw ctypes object.
The table below compares the syntax for creating shared ctypes objects from
shared memory with the normal ctypes syntax. (In the table ``MyStruct`` is some
subclass of :class:`ctypes.Structure`.)
==================== ========================== ===========================
ctypes sharedctypes using type sharedctypes using typecode
==================== ========================== ===========================
c_double(2.4) RawValue(c_double, 2.4) RawValue('d', 2.4)
MyStruct(4, 6) RawValue(MyStruct, 4, 6)
(c_short * 7)() RawArray(c_short, 7) RawArray('h', 7)
(c_int * 3)(9, 2, 8) RawArray(c_int, (9, 2, 8)) RawArray('i', (9, 2, 8))
==================== ========================== ===========================
Below is an example where a number of ctypes objects are modified by a child
process::
from multiprocessing import Process, Lock
from multiprocessing.sharedctypes import Value, Array
from ctypes import Structure, c_double
class Point(Structure):
_fields_ = [('x', c_double), ('y', c_double)]
def modify(n, x, s, A):
n.value **= 2
x.value **= 2
s.value = s.value.upper()
for a in A:
a.x **= 2
a.y **= 2
if __name__ == '__main__':
lock = Lock()
n = Value('i', 7)
x = Value(ctypes.c_double, 1.0/3.0, lock=False)
s = Array('c', 'hello world', lock=lock)
A = Array(Point, [(1.875,-6.25), (-5.75,2.0), (2.375,9.5)], lock=lock)
p = Process(target=modify, args=(n, x, s, A))
p.start()
p.join()
print n.value
print x.value
print s.value
print [(a.x, a.y) for a in A]
.. highlightlang:: none
The results printed are ::
49
0.1111111111111111
HELLO WORLD
[(3.515625, 39.0625), (33.0625, 4.0), (5.640625, 90.25)]
.. highlightlang:: python
.. _multiprocessing-managers:
Managers
~~~~~~~~
Managers provide a way to create data which can be shared between different
processes. A manager object controls a server process which manages *shared
objects*. Other processes can access the shared objects by using proxies.
.. function:: multiprocessing.Manager()
Returns a started :class:`SyncManager` object which can be used for sharing
objects between processes. The returned manager object corresponds to a
spawned child process and has methods which will create shared objects and
return corresponding proxies.
.. module:: multiprocessing.managers
:synopsis: Share data between process with shared objects.
Manager processes will be shutdown as soon as they are garbage collected or
their parent process exits. The manager classes are defined in the
:mod:`multiprocessing.managers` module:
.. class:: BaseManager([address[, authkey]])
Create a BaseManager object.
Once created one should call :meth:`start` or :meth:`serve_forever` to ensure
that the manager object refers to a started manager process.
*address* is the address on which the manager process listens for new
connections. If *address* is ``None`` then an arbitrary one is chosen.
*authkey* is the authentication key which will be used to check the validity
of incoming connections to the server process. If *authkey* is ``None`` then
``current_process().get_auth_key()``. Otherwise *authkey* is used and it
must be a string.
.. method:: start()
Start a subprocess to start the manager.
.. method:: server_forever()
Run the server in the current process.
.. method:: from_address(address, authkey)
A class method which creates a manager object referring to a pre-existing
server process which is using the given address and authentication key.
.. method:: shutdown()
Stop the process used by the manager. This is only available if
meth:`start` has been used to start the server process.
This can be called multiple times.
.. method:: register(typeid[, callable[, proxytype[, exposed[, method_to_typeid[, create_method]]]]])
A classmethod which can be used for registering a type or callable with
the manager class.
*typeid* is a "type identifier" which is used to identify a particular
type of shared object. This must be a string.
*callable* is a callable used for creating objects for this type
identifier. If a manager instance will be created using the
:meth:`from_address()` classmethod or if the *create_method* argument is
``False`` then this can be left as ``None``.
*proxytype* is a subclass of :class:`multiprocessing.managers.BaseProxy`
which is used to create proxies for shared objects with this *typeid*. If
``None`` then a proxy class is created automatically.
*exposed* is used to specify a sequence of method names which proxies for
this typeid should be allowed to access using
:meth:`BaseProxy._callMethod`. (If *exposed* is ``None`` then
:attr:`proxytype._exposed_` is used instead if it exists.) In the case
where no exposed list is specified, all "public methods" of the shared
object will be accessible. (Here a "public method" means any attribute
which has a ``__call__()`` method and whose name does not begin with
``'_'``.)
*method_to_typeid* is a mapping used to specify the return type of those
exposed methods which should return a proxy. It maps method names to
typeid strings. (If *method_to_typeid* is ``None`` then
:attr:`proxytype._method_to_typeid_` is used instead if it exists.) If a
method's name is not a key of this mapping or if the mapping is ``None``
then the object returned by the method will be copied by value.
*create_method* determines whether a method should be created with name
*typeid* which can be used to tell the server process to create a new
shared object and return a proxy for it. By default it is ``True``.
:class:`BaseManager` instances also have one read-only property:
.. attribute:: address
The address used by the manager.
.. class:: SyncManager
A subclass of :class:`BaseManager` which can be used for the synchronization
of processes. Objects of this type are returned by
:func:`multiprocessing.Manager()`.
It also supports creation of shared lists and dictionaries.
.. method:: BoundedSemaphore([value])
Create a shared :class:`threading.BoundedSemaphore` object and return a
proxy for it.
.. method:: Condition([lock])
Create a shared :class:`threading.Condition` object and return a proxy for
it.
If *lock* is supplied then it should be a proxy for a
:class:`threading.Lock` or :class:`threading.RLock` object.
.. method:: Event()
Create a shared :class:`threading.Event` object and return a proxy for it.
.. method:: Lock()
Create a shared :class:`threading.Lock` object and return a proxy for it.
.. method:: Namespace()
Create a shared :class:`Namespace` object and return a proxy for it.
.. method:: Queue([maxsize])
Create a shared `Queue.Queue` object and return a proxy for it.
.. method:: RLock()
Create a shared :class:`threading.RLock` object and return a proxy for it.
.. method:: Semaphore([value])
Create a shared :class:`threading.Semaphore` object and return a proxy for
it.
.. method:: Array(typecode, sequence)
Create an array and return a proxy for it. (*format* is ignored.)
.. method:: Value(typecode, value)
Create an object with a writable ``value`` attribute and return a proxy
for it.
.. method:: dict()
dict(mapping)
dict(sequence)
Create a shared ``dict`` object and return a proxy for it.
.. method:: list()
list(sequence)
Create a shared ``list`` object and return a proxy for it.
Namespace objects
>>>>>>>>>>>>>>>>>
A namespace object has no public methods, but does have writable attributes.
Its representation shows the values of its attributes.
However, when using a proxy for a namespace object, an attribute beginning with
``'_'`` will be an attribute of the proxy and not an attribute of the referent::
>>> manager = multiprocessing.Manager()
>>> Global = manager.Namespace()
>>> Global.x = 10
>>> Global.y = 'hello'
>>> Global._z = 12.3 # this is an attribute of the proxy
>>> print Global
Namespace(x=10, y='hello')
Customized managers
>>>>>>>>>>>>>>>>>>>
To create one's own manager, one creates a subclass of :class:`BaseManager` and
use the :meth:`resgister()` classmethod to register new types or callables with
the manager class. For example::
from multiprocessing.managers import BaseManager
class MathsClass(object):
def add(self, x, y):
return x + y
def mul(self, x, y):
return x * y
class MyManager(BaseManager):
pass
MyManager.register('Maths', MathsClass)
if __name__ == '__main__':
manager = MyManager()
manager.start()
maths = manager.Maths()
print maths.add(4, 3) # prints 7
print maths.mul(7, 8) # prints 56
Using a remote manager
>>>>>>>>>>>>>>>>>>>>>>
It is possible to run a manager server on one machine and have clients use it
from other machines (assuming that the firewalls involved allow it).
Running the following commands creates a server for a single shared queue which
remote clients can access::
>>> from multiprocessing.managers import BaseManager
>>> import Queue
>>> queue = Queue.Queue()
>>> class QueueManager(BaseManager): pass
...
>>> QueueManager.register('getQueue', callable=lambda:queue)
>>> m = QueueManager(address=('', 50000), authkey='abracadabra')
>>> m.serveForever()
One client can access the server as follows::
>>> from multiprocessing.managers import BaseManager
>>> class QueueManager(BaseManager): pass
...
>>> QueueManager.register('getQueue')
>>> m = QueueManager.from_address(address=('foo.bar.org', 50000),
>>> authkey='abracadabra')
>>> queue = m.getQueue()
>>> queue.put('hello')
Another client can also use it::
>>> from multiprocessing.managers import BaseManager
>>> class QueueManager(BaseManager): pass
...
>>> QueueManager.register('getQueue')
>>> m = QueueManager.from_address(address=('foo.bar.org', 50000), authkey='abracadabra')
>>> queue = m.getQueue()
>>> queue.get()
'hello'
Proxy Objects
~~~~~~~~~~~~~
A proxy is an object which *refers* to a shared object which lives (presumably)
in a different process. The shared object is said to be the *referent* of the
proxy. Multiple proxy objects may have the same referent.
A proxy object has methods which invoke corresponding methods of its referent
(although not every method of the referent will necessarily be available through
the proxy). A proxy can usually be used in most of the same ways that its
referent can::
>>> from multiprocessing import Manager
>>> manager = Manager()
>>> l = manager.list([i*i for i in range(10)])
>>> print l
[0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
>>> print repr(l)
<ListProxy object, typeid 'list' at 0xb799974c>
>>> l[4]
16
>>> l[2:5]
[4, 9, 16]
Notice that applying :func:`str` to a proxy will return the representation of
the referent, whereas applying :func:`repr` will return the representation of
the proxy.
An important feature of proxy objects is that they are picklable so they can be
passed between processes. Note, however, that if a proxy is sent to the
corresponding manager's process then unpickling it will produce the referent
itself. This means, for example, that one shared object can contain a second::
>>> a = manager.list()
>>> b = manager.list()
>>> a.append(b) # referent of `a` now contains referent of `b`
>>> print a, b
[[]] []
>>> b.append('hello')
>>> print a, b
[['hello']] ['hello']
.. note::
The proxy types in :mod:`multiprocessing` do nothing to support comparisons
by value. So, for instance, ::
manager.list([1,2,3]) == [1,2,3]
will return ``False``. One should just use a copy of the referent instead
when making comparisons.
.. class:: BaseProxy
Proxy objects are instances of subclasses of :class:`BaseProxy`.
.. method:: _call_method(methodname[, args[, kwds]])
Call and return the result of a method of the proxy's referent.
If ``proxy`` is a proxy whose referent is ``obj`` then the expression ::
proxy._call_method(methodname, args, kwds)
will evaluate the expression ::
getattr(obj, methodname)(*args, **kwds)
in the manager's process.
The returned value will be a copy of the result of the call or a proxy to
a new shared object -- see documentation for the *method_to_typeid*
argument of :meth:`BaseManager.register`.
If an exception is raised by the call, then then is re-raised by
:meth:`_call_method`. If some other exception is raised in the manager's
process then this is converted into a :exc:`RemoteError` exception and is
raised by :meth:`_call_method`.
Note in particular that an exception will be raised if *methodname* has
not been *exposed*
An example of the usage of :meth:`_call_method()`::
>>> l = manager.list(range(10))
>>> l._call_method('__len__')
10
>>> l._call_method('__getslice__', (2, 7)) # equiv to `l[2:7]`
[2, 3, 4, 5, 6]
>>> l._call_method('__getitem__', (20,)) # equiv to `l[20]`
Traceback (most recent call last):
...
IndexError: list index out of range
.. method:: _get_value()
Return a copy of the referent.
If the referent is unpicklable then this will raise an exception.
.. method:: __repr__
Return a representation of the proxy object.
.. method:: __str__
Return the representation of the referent.
Cleanup
>>>>>>>
A proxy object uses a weakref callback so that when it gets garbage collected it
deregisters itself from the manager which owns its referent.
A shared object gets deleted from the manager process when there are no longer
any proxies referring to it.
Process Pools
~~~~~~~~~~~~~
.. module:: multiprocessing.pool
:synopsis: Create pools of processes.
One can create a pool of processes which will carry out tasks submitted to it
with the :class:`Pool` class in :mod:`multiprocess.pool`.
.. class:: multiprocessing.Pool([processes[, initializer[, initargs]]])
A process pool object which controls a pool of worker processes to which jobs
can be submitted. It supports asynchronous results with timeouts and
callbacks and has a parallel map implementation.
*processes* is the number of worker processes to use. If *processes* is
``None`` then the number returned by :func:`cpu_count` is used. If
*initializer* is not ``None`` then each worker process will call
``initializer(*initargs)`` when it starts.
.. method:: apply(func[, args[, kwds]])
Equivalent of the :func:`apply` builtin function. It blocks till the
result is ready.
.. method:: apply_async(func[, args[, kwds[, callback]]])
A variant of the :meth:`apply` method which returns a result object.
If *callback* is specified then it should be a callable which accepts a
single argument. When the result becomes ready *callback* is applied to
it (unless the call failed). *callback* should complete immediately since
otherwise the thread which handles the results will get blocked.
.. method:: map(func, iterable[, chunksize])
A parallel equivalent of the :func:`map` builtin function. It blocks till
the result is ready.
This method chops the iterable into a number of chunks which it submits to
the process pool as separate tasks. The (approximate) size of these
chunks can be specified by setting *chunksize* to a positive integer.
.. method:: map_async(func, iterable[, chunksize[, callback]])
A variant of the :meth:`.map` method which returns a result object.
If *callback* is specified then it should be a callable which accepts a
single argument. When the result becomes ready *callback* is applied to
it (unless the call failed). *callback* should complete immediately since
otherwise the thread which handles the results will get blocked.
.. method:: imap(func, iterable[, chunksize])
An equivalent of :func:`itertools.imap`.
The *chunksize* argument is the same as the one used by the :meth:`.map`
method. For very long iterables using a large value for *chunksize* can
make make the job complete **much** faster than using the default value of
``1``.
Also if *chunksize* is ``1`` then the :meth:`next` method of the iterator
returned by the :meth:`imap` method has an optional *timeout* parameter:
``next(timeout)`` will raise :exc:`multiprocessing.TimeoutError` if the
result cannot be returned within *timeout* seconds.
.. method:: imap_unordered(func, iterable[, chunksize])
The same as :meth:`imap` except that the ordering of the results from the
returned iterator should be considered arbitrary. (Only when there is
only one worker process is the order guaranteed to be "correct".)
.. method:: close()
Prevents any more tasks from being submitted to the pool. Once all the
tasks have been completed the worker processes will exit.
.. method:: terminate()
Stops the worker processes immediately without completing outstanding
work. When the pool object is garbage collected :meth:`terminate` will be
called immediately.
.. method:: join()
Wait for the worker processes to exit. One must call :meth:`close` or
:meth:`terminate` before using :meth:`join`.
.. class:: AsyncResult
The class of the result returned by :meth:`Pool.apply_async` and
:meth:`Pool.map_async`.
.. method:: get([timeout)
Return the result when it arrives. If *timeout* is not ``None`` and the
result does not arrive within *timeout* seconds then
:exc:`multiprocessing.TimeoutError` is raised. If the remote call raised
an exception then that exception will be reraised by :meth:`get`.
.. method:: wait([timeout])
Wait until the result is available or until *timeout* seconds pass.
.. method:: ready()
Return whether the call has completed.
.. method:: successful()
Return whether the call completed without raising an exception. Will
raise :exc:`AssertionError` if the result is not ready.
The following example demonstrates the use of a pool::
from multiprocessing import Pool
def f(x):
return x*x
if __name__ == '__main__':
pool = Pool(processes=4) # start 4 worker processes
result = pool.applyAsync(f, (10,)) # evaluate "f(10)" asynchronously
print result.get(timeout=1) # prints "100" unless your computer is *very* slow
print pool.map(f, range(10)) # prints "[0, 1, 4,..., 81]"
it = pool.imap(f, range(10))
print it.next() # prints "0"
print it.next() # prints "1"
print it.next(timeout=1) # prints "4" unless your computer is *very* slow
import time
result = pool.applyAsync(time.sleep, (10,))
print result.get(timeout=1) # raises TimeoutError
.. _multiprocessing-listeners-clients:
Listeners and Clients
~~~~~~~~~~~~~~~~~~~~~
.. module:: multiprocessing.connection
:synopsis: API for dealing with sockets.
Usually message passing between processes is done using queues or by using
:class:`Connection` objects returned by :func:`Pipe`.
However, the :mod:`multiprocessing.connection` module allows some extra
flexibility. It basically gives a high level message oriented API for dealing
with sockets or Windows named pipes, and also has support for *digest
authentication* using the :mod:`hmac` module from the standard library.
.. function:: deliver_challenge(connection, authkey)
Send a randomly generated message to the other end of the connection and wait
for a reply.
If the reply matches the digest of the message using *authkey* as the key
then a welcome message is sent to the other end of the connection. Otherwise
:exc:`AuthenticationError` is raised.
.. function:: answerChallenge(connection, authkey)
Receive a message, calculate the digest of the message using *authkey* as the
key, and then send the digest back.
If a welcome message is not received, then :exc:`AuthenticationError` is
raised.
.. function:: Client(address[, family[, authenticate[, authkey]]])
Attempt to set up a connection to the listener which is using address
*address*, returning a :class:`Connection`.
The type of the connection is determined by *family* argument, but this can
generally be omitted since it can usually be inferred from the format of
*address*. (See :ref:`multiprocessing-address-formats`)
If *authentication* is ``True`` or *authkey* is a string then digest
authentication is used. The key used for authentication will be either
*authkey* or ``current_process().get_auth_key()`` if *authkey* is ``None``.
If authentication fails then :exc:`AuthenticationError` is raised. See
:ref:`multiprocessing-auth-keys`.
.. class:: Listener([address[, family[, backlog[, authenticate[, authkey]]]]])
A wrapper for a bound socket or Windows named pipe which is 'listening' for
connections.
*address* is the address to be used by the bound socket or named pipe of the
listener object.
*family* is the type of socket (or named pipe) to use. This can be one of
the strings ``'AF_INET'`` (for a TCP socket), ``'AF_UNIX'`` (for a Unix
domain socket) or ``'AF_PIPE'`` (for a Windows named pipe). Of these only
the first is guaranteed to be available. If *family* is ``None`` then the
family is inferred from the format of *address*. If *address* is also
``None`` then a default is chosen. This default is the family which is
assumed to be the fastest available. See
:ref:`multiprocessing-address-formats`. Note that if *family* is
``'AF_UNIX'`` and address is ``None`` then the socket will be created in a
private temporary directory created using :func:`tempfile.mkstemp`.
If the listener object uses a socket then *backlog* (1 by default) is passed
to the :meth:`listen` method of the socket once it has been bound.
If *authenticate* is ``True`` (``False`` by default) or *authkey* is not
``None`` then digest authentication is used.
If *authkey* is a string then it will be used as the authentication key;
otherwise it must be *None*.
If *authkey* is ``None`` and *authenticate* is ``True`` then
``current_process().get_auth_key()`` is used as the authentication key. If
*authkey* is ``None`` and *authentication* is ``False`` then no
authentication is done. If authentication fails then
:exc:`AuthenticationError` is raised. See :ref:`multiprocessing-auth-keys`.
.. method:: accept()
Accept a connection on the bound socket or named pipe of the listener
object and return a :class:`Connection` object. If authentication is
attempted and fails, then :exc:`AuthenticationError` is raised.
.. method:: close()
Close the bound socket or named pipe of the listener object. This is
called automatically when the listener is garbage collected. However it
is advisable to call it explicitly.
Listener objects have the following read-only properties:
.. attribute:: address
The address which is being used by the Listener object.
.. attribute:: last_accepted
The address from which the last accepted connection came. If this is
unavailable then it is ``None``.
The module defines two exceptions:
.. exception:: AuthenticationError
Exception raised when there is an authentication error.
.. exception:: BufferTooShort
Exception raise by the :meth:`Connection.recv_bytes_into` method of a
connection object when the supplied buffer object is too small for the
message read.
If *e* is an instance of :exc:`BufferTooShort` then ``e.args[0]`` will give
the message as a byte string.
**Examples**
The following server code creates a listener which uses ``'secret password'`` as
an authentication key. It then waits for a connection and sends some data to
the client::
from multiprocessing.connection import Listener
from array import array
address = ('localhost', 6000) # family is deduced to be 'AF_INET'
listener = Listener(address, authkey='secret password')
conn = listener.accept()
print 'connection accepted from', listener.last_accepted
conn.send([2.25, None, 'junk', float])
conn.send_bytes('hello')
conn.send_bytes(array('i', [42, 1729]))
conn.close()
listener.close()
The following code connects to the server and receives some data from the
server::
from multiprocessing.connection import Client
from array import array
address = ('localhost', 6000)
conn = Client(address, authkey='secret password')
print conn.recv() # => [2.25, None, 'junk', float]
print conn.recv_bytes() # => 'hello'
arr = array('i', [0, 0, 0, 0, 0])
print conn.recv_bytes_into(arr) # => 8
print arr # => array('i', [42, 1729, 0, 0, 0])
conn.close()
.. _multiprocessing-address-formats:
Address Formats
>>>>>>>>>>>>>>>
* An ``'AF_INET'`` address is a tuple of the form ``(hostname, port)``` where
*hostname* is a string and *port* is an integer.
* An ``'AF_UNIX'``` address is a string representing a filename on the
filesystem.
* An ``'AF_PIPE'`` address is a string of the form
``r'\\\\.\\pipe\\PipeName'``. To use :func:`Client` to connect to a named
pipe on a remote computer called ServerName* one should use an address of the
form ``r'\\\\ServerName\\pipe\\PipeName'`` instead.
Note that any string beginning with two backslashes is assumed by default to be
an ``'AF_PIPE'`` address rather than an ``'AF_UNIX'`` address.
.. _multiprocessing-auth-keys:
Authentication keys
~~~~~~~~~~~~~~~~~~~
When one uses :meth:`Connection.recv`, the data received is automatically
unpickled. Unfortunately unpickling data from an untrusted source is a security
risk. Therefore :class:`Listener` and :func:`Client` use the :mod:`hmac` module
to provide digest authentication.
An authentication key is a string which can be thought of as a password: once a
connection is established both ends will demand proof that the other knows the
authentication key. (Demonstrating that both ends are using the same key does
**not** involve sending the key over the connection.)
If authentication is requested but do authentication key is specified then the
return value of ``current_process().get_auth_key`` is used (see
:class:`Process`). This value will automatically inherited by any
:class:`Process` object that the current process creates. This means that (by
default) all processes of a multi-process program will share a single
authentication key which can be used when setting up connections between the
themselves.
Suitable authentication keys can also be generated by using :func:`os.urandom`.
Logging
~~~~~~~
Some support for logging is available. Note, however, that the :mod:`logging`
package does not use process shared locks so it is possible (depending on the
handler type) for messages from different processes to get mixed up.
.. currentmodule:: multiprocessing
.. function:: get_logger()
Returns the logger used by :mod:`multiprocessing`. If necessary, a new one
will be created.
When first created the logger has level :data:`logging.NOTSET` and has a
handler which sends output to :data:`sys.stderr` using format
``'[%(levelname)s/%(processName)s] %(message)s'``. (The logger allows use of
the non-standard ``'%(processName)s'`` format.) Message sent to this logger
will not by default propogate to the root logger.
Note that on Windows child processes will only inherit the level of the
parent process's logger -- any other customization of the logger will not be
inherited.
Below is an example session with logging turned on::
>>> import processing, logging
>>> logger = processing.getLogger()
>>> logger.setLevel(logging.INFO)
>>> logger.warning('doomed')
[WARNING/MainProcess] doomed
>>> m = processing.Manager()
[INFO/SyncManager-1] child process calling self.run()
[INFO/SyncManager-1] manager bound to '\\\\.\\pipe\\pyc-2776-0-lj0tfa'
>>> del m
[INFO/MainProcess] sending shutdown message to manager
[INFO/SyncManager-1] manager exiting with exitcode 0
The :mod:`multiprocessing.dummy` module
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. module:: multiprocessing.dummy
:synopsis: Dumb wrapper around threading.
:mod:`multiprocessing.dummy` replicates the API of :mod:`multiprocessing` but is
no more than a wrapper around the `threading` module.
.. _multiprocessing-programming:
Programming guidelines
----------------------
There are certain guidelines and idioms which should be adhered to when using
:mod:`multiprocessing`.
All platforms
~~~~~~~~~~~~~
Avoid shared state
As far as possible one should try to avoid shifting large amounts of data
between processes.
It is probably best to stick to using queues or pipes for communication
between processes rather than using the lower level synchronization
primitives from the :mod:`threading` module.
Picklability
Ensure that the arguments to the methods of proxies are picklable.
Thread safety of proxies
Do not use a proxy object from more than one thread unless you protect it
with a lock.
(There is never a problem with different processes using the *same* proxy.)
Joining zombie processes
On Unix when a process finishes but has not been joined it becomes a zombie.
There should never be very many because each time a new process starts (or
:func:`active_children` is called) all completed processes which have not
yet been joined will be joined. Also calling a finished process's
:meth:`Process.is_alive` will join the process. Even so it is probably good
practice to explicitly join all the processes that you start.
Better to inherit than pickle/unpickle
On Windows many of types from :mod:`multiprocessing` need to be picklable so
that child processes can use them. However, one should generally avoid
sending shared objects to other processes using pipes or queues. Instead
you should arrange the program so that a process which need access to a
shared resource created elsewhere can inherit it from an ancestor process.
Avoid terminating processes
Using the :meth:`Process.terminate` method to stop a process is liable to
cause any shared resources (such as locks, semaphores, pipes and queues)
currently being used by the process to become broken or unavailable to other
processes.
Therefore it is probably best to only consider using
:meth:`Process.terminate()` on processes which never use any shared
resources.
Joining processes that use queues
Bear in mind that a process that has put items in a queue will wait before
terminating until all the buffered items are fed by the "feeder" thread to
the underlying pipe. (The child process can call the
:meth:`Queue.cancel_join` method of the queue to avoid this behaviour.)
This means that whenever you use a queue you need to make sure that all
items which have been put on the queue will eventually be removed before the
process is joined. Otherwise you cannot be sure that processes which have
put items on the queue will terminate. Remember also that non-daemonic
processes will be automatically be joined.
An example which will deadlock is the following::
from multiprocessing import Process, Queue
def f(q):
q.put('X' * 1000000)
if __name__ == '__main__':
queue = Queue()
p = Process(target=f, args=(queue,))
p.start()
p.join() # this deadlocks
obj = queue.get()
A fix here would be to swap the last two lines round (or simply remove the
``p.join()`` line).
Explicity pass resources to child processes
On Unix a child process can make use of a shared resource created in a
parent process using a global resource. However, it is better to pass the
object as an argument to the constructor for the child process.
Apart from making the code (potentially) compatible with Windows this also
ensures that as long as the child process is still alive the object will not
be garbage collected in the parent process. This might be important if some
resource is freed when the object is garbage collected in the parent
process.
So for instance ::
from multiprocessing import Process, Lock
def f():
... do something using "lock" ...
if __name__ == '__main__':
lock = Lock()
for i in range(10):
Process(target=f).start()
should be rewritten as ::
from multiprocessing import Process, Lock
def f(l):
... do something using "l" ...
if __name__ == '__main__':
lock = Lock()
for i in range(10):
Process(target=f, args=(lock,)).start()
Windows
~~~~~~~
Since Windows lacks :func:`os.fork` it has a few extra restrictions:
More picklability
Ensure that all arguments to :meth:`Process.__init__` are picklable. This
means, in particular, that bound or unbound methods cannot be used directly
as the ``target`` argument on Windows --- just define a function and use
that instead.
Also, if you subclass :class:`Process` then make sure that instances will be
picklable when the :meth:`Process.start` method is called.
Global variables
Bear in mind that if code run in a child process tries to access a global
variable, then the value it sees (if any) may not be the same as the value
in the parent process at the time that :meth:`Process.start` was called.
However, global variables which are just module level constants cause no
problems.
Safe importing of main module
Make sure that the main module can be safely imported by a new Python
interpreter without causing unintended side effects (such a starting a new
process).
For example, under Windows running the following module would fail with a
:exc:`RuntimeError`::
from multiprocessing import Process
def foo():
print 'hello'
p = Process(target=foo)
p.start()
Instead one should protect the "entry point" of the program by using ``if
__name__ == '__main__':`` as follows::
from multiprocessing import Process, freeze_support
def foo():
print 'hello'
if __name__ == '__main__':
freeze_support()
p = Process(target=foo)
p.start()
(The :func:`freeze_support()` line can be omitted if the program will be run
normally instead of frozen.)
This allows the newly spawned Python interpreter to safely import the module
and then run the module's ``foo()`` function.
Similar restrictions apply if a pool or manager is created in the main
module.
.. _multiprocessing-examples:
Examples
--------
Demonstration of how to create and use customized managers and proxies:
.. literalinclude:: ../includes/mp_newtype.py
Using :class:`Pool`:
.. literalinclude:: ../includes/mp_pool.py
Synchronization types like locks, conditions and queues:
.. literalinclude:: ../includes/mp_synchronize.py
An showing how to use queues to feed tasks to a collection of worker process and
collect the results:
.. literalinclude:: ../includes/mp_workers.py
An example of how a pool of worker processes can each run a
:class:`SimpleHTTPServer.HttpServer` instance while sharing a single listening
socket.
.. literalinclude:: ../includes/mp_webserver.py
Some simple benchmarks comparing :mod:`multiprocessing` with :mod:`threading`:
.. literalinclude:: ../includes/mp_benchmarks.py
An example/demo of how to use the :class:`managers.SyncManager`, :class:`Process`
and others to build a system which can distribute processes and work via a
distributed queue to a "cluster" of machines on a network, accessible via SSH.
You will need to have private key authentication for all hosts configured for
this to work.
.. literalinclude:: ../includes/mp_distributing.py
\ No newline at end of file
......@@ -18,6 +18,7 @@ some other systems as well (e.g. Windows or NT). Here's an overview:
threading.rst
dummy_thread.rst
dummy_threading.rst
multiprocessing.rst
mmap.rst
readline.rst
rlcompleter.rst
#
# Package analogous to 'threading.py' but using processes
#
# multiprocessing/__init__.py
#
# This package is intended to duplicate the functionality (and much of
# the API) of threading.py but uses processes instead of threads. A
# subpackage 'multiprocessing.dummy' has the same API but is a simple
# wrapper for 'threading'.
#
# Try calling `multiprocessing.doc.main()` to read the html
# documentation in in a webbrowser.
#
#
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of author nor the names of any contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
#
__version__ = '0.70a1'
__all__ = [
'Process', 'current_process', 'active_children', 'freeze_support',
'Manager', 'Pipe', 'cpu_count', 'log_to_stderr', 'get_logger',
'allow_connection_pickling', 'BufferTooShort', 'TimeoutError',
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition',
'Event', 'Queue', 'JoinableQueue', 'Pool', 'Value', 'Array',
'RawValue', 'RawArray'
]
__author__ = 'R. Oudkerk (r.m.oudkerk@gmail.com)'
#
# Imports
#
import os
import sys
import _multiprocessing
from multiprocessing.process import Process, current_process, active_children
#
# Exceptions
#
class ProcessError(Exception):
pass
class BufferTooShort(ProcessError):
pass
class TimeoutError(ProcessError):
pass
class AuthenticationError(ProcessError):
pass
#
# Definitions not depending on native semaphores
#
def Manager():
'''
Returns a manager associated with a running server process
The managers methods such as `Lock()`, `Condition()` and `Queue()`
can be used to create shared objects.
'''
from multiprocessing.managers import SyncManager
m = SyncManager()
m.start()
return m
def Pipe(duplex=True):
'''
Returns two connection object connected by a pipe
'''
from multiprocessing.connection import Pipe
return Pipe(duplex)
def cpu_count():
'''
Returns the number of CPUs in the system
'''
if sys.platform == 'win32':
try:
num = int(os.environ['NUMBER_OF_PROCESSORS'])
except (ValueError, KeyError):
num = 0
elif sys.platform == 'darwin':
try:
num = int(os.popen('sysctl -n hw.ncpu').read())
except ValueError:
num = 0
else:
try:
num = os.sysconf('SC_NPROCESSORS_ONLN')
except (ValueError, OSError, AttributeError):
num = 0
if num >= 1:
return num
else:
raise NotImplementedError('cannot determine number of cpus')
def freeze_support():
'''
Check whether this is a fake forked process in a frozen executable.
If so then run code specified by commandline and exit.
'''
if sys.platform == 'win32' and getattr(sys, 'frozen', False):
from multiprocessing.forking import freeze_support
freeze_support()
def get_logger():
'''
Return package logger -- if it does not already exist then it is created
'''
from multiprocessing.util import get_logger
return get_logger()
def log_to_stderr(level=None):
'''
Turn on logging and add a handler which prints to stderr
'''
from multiprocessing.util import log_to_stderr
return log_to_stderr(level)
def allow_connection_pickling():
'''
Install support for sending connections and sockets between processes
'''
from multiprocessing import reduction
#
# Definitions depending on native semaphores
#
def Lock():
'''
Returns a non-recursive lock object
'''
from multiprocessing.synchronize import Lock
return Lock()
def RLock():
'''
Returns a recursive lock object
'''
from multiprocessing.synchronize import RLock
return RLock()
def Condition(lock=None):
'''
Returns a condition object
'''
from multiprocessing.synchronize import Condition
return Condition(lock)
def Semaphore(value=1):
'''
Returns a semaphore object
'''
from multiprocessing.synchronize import Semaphore
return Semaphore(value)
def BoundedSemaphore(value=1):
'''
Returns a bounded semaphore object
'''
from multiprocessing.synchronize import BoundedSemaphore
return BoundedSemaphore(value)
def Event():
'''
Returns an event object
'''
from multiprocessing.synchronize import Event
return Event()
def Queue(maxsize=0):
'''
Returns a queue object
'''
from multiprocessing.queues import Queue
return Queue(maxsize)
def JoinableQueue(maxsize=0):
'''
Returns a queue object
'''
from multiprocessing.queues import JoinableQueue
return JoinableQueue(maxsize)
def Pool(processes=None, initializer=None, initargs=()):
'''
Returns a process pool object
'''
from multiprocessing.pool import Pool
return Pool(processes, initializer, initargs)
def RawValue(typecode_or_type, *args):
'''
Returns a shared object
'''
from multiprocessing.sharedctypes import RawValue
return RawValue(typecode_or_type, *args)
def RawArray(typecode_or_type, size_or_initializer):
'''
Returns a shared array
'''
from multiprocessing.sharedctypes import RawArray
return RawArray(typecode_or_type, size_or_initializer)
def Value(typecode_or_type, *args, **kwds):
'''
Returns a synchronized shared object
'''
from multiprocessing.sharedctypes import Value
return Value(typecode_or_type, *args, **kwds)
def Array(typecode_or_type, size_or_initializer, **kwds):
'''
Returns a synchronized shared array
'''
from multiprocessing.sharedctypes import Array
return Array(typecode_or_type, size_or_initializer, **kwds)
#
#
#
if sys.platform == 'win32':
def set_executable(executable):
'''
Sets the path to a python.exe or pythonw.exe binary used to run
child processes on Windows instead of sys.executable.
Useful for people embedding Python.
'''
from multiprocessing.forking import set_executable
set_executable(executable)
__all__ += ['set_executable']
#
# A higher level module for using sockets (or Windows named pipes)
#
# multiprocessing/connection.py
#
# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
#
__all__ = [ 'Client', 'Listener', 'Pipe' ]
import os
import sys
import socket
import time
import tempfile
import itertools
import _multiprocessing
from multiprocessing import current_process
from multiprocessing.util import get_temp_dir, Finalize, sub_debug, debug
from multiprocessing.forking import duplicate, close
#
#
#
BUFSIZE = 8192
_mmap_counter = itertools.count()
default_family = 'AF_INET'
families = ['AF_INET']
if hasattr(socket, 'AF_UNIX'):
default_family = 'AF_UNIX'
families += ['AF_UNIX']
if sys.platform == 'win32':
default_family = 'AF_PIPE'
families += ['AF_PIPE']
#
#
#
def arbitrary_address(family):
'''
Return an arbitrary free address for the given family
'''
if family == 'AF_INET':
return ('localhost', 0)
elif family == 'AF_UNIX':
return tempfile.mktemp(prefix='listener-', dir=get_temp_dir())
elif family == 'AF_PIPE':
return tempfile.mktemp(prefix=r'\\.\pipe\pyc-%d-%d-' %
(os.getpid(), _mmap_counter.next()))
else:
raise ValueError('unrecognized family')
def address_type(address):
'''
Return the types of the address
This can be 'AF_INET', 'AF_UNIX', or 'AF_PIPE'
'''
if type(address) == tuple:
return 'AF_INET'
elif type(address) is str and address.startswith('\\\\'):
return 'AF_PIPE'
elif type(address) is str:
return 'AF_UNIX'
else:
raise ValueError('address type of %r unrecognized' % address)
#
# Public functions
#
class Listener(object):
'''
Returns a listener object.
This is a wrapper for a bound socket which is 'listening' for
connections, or for a Windows named pipe.
'''
def __init__(self, address=None, family=None, backlog=1, authkey=None):
family = family or (address and address_type(address)) \
or default_family
address = address or arbitrary_address(family)
if family == 'AF_PIPE':
self._listener = PipeListener(address, backlog)
else:
self._listener = SocketListener(address, family, backlog)
if authkey is not None and not isinstance(authkey, bytes):
raise TypeError, 'authkey should be a byte string'
self._authkey = authkey
def accept(self):
'''
Accept a connection on the bound socket or named pipe of `self`.
Returns a `Connection` object.
'''
c = self._listener.accept()
if self._authkey:
deliver_challenge(c, self._authkey)
answer_challenge(c, self._authkey)
return c
def close(self):
'''
Close the bound socket or named pipe of `self`.
'''
return self._listener.close()
address = property(lambda self: self._listener._address)
last_accepted = property(lambda self: self._listener._last_accepted)
def Client(address, family=None, authkey=None):
'''
Returns a connection to the address of a `Listener`
'''
family = family or address_type(address)
if family == 'AF_PIPE':
c = PipeClient(address)
else:
c = SocketClient(address)
if authkey is not None and not isinstance(authkey, bytes):
raise TypeError, 'authkey should be a byte string'
if authkey is not None:
answer_challenge(c, authkey)
deliver_challenge(c, authkey)
return c
if sys.platform != 'win32':
def Pipe(duplex=True):
'''
Returns pair of connection objects at either end of a pipe
'''
if duplex:
s1, s2 = socket.socketpair()
c1 = _multiprocessing.Connection(os.dup(s1.fileno()))
c2 = _multiprocessing.Connection(os.dup(s2.fileno()))
s1.close()
s2.close()
else:
fd1, fd2 = os.pipe()
c1 = _multiprocessing.Connection(fd1, writable=False)
c2 = _multiprocessing.Connection(fd2, readable=False)
return c1, c2
else:
from ._multiprocessing import win32
def Pipe(duplex=True):
'''
Returns pair of connection objects at either end of a pipe
'''
address = arbitrary_address('AF_PIPE')
if duplex:
openmode = win32.PIPE_ACCESS_DUPLEX
access = win32.GENERIC_READ | win32.GENERIC_WRITE
obsize, ibsize = BUFSIZE, BUFSIZE
else:
openmode = win32.PIPE_ACCESS_INBOUND
access = win32.GENERIC_WRITE
obsize, ibsize = 0, BUFSIZE
h1 = win32.CreateNamedPipe(
address, openmode,
win32.PIPE_TYPE_MESSAGE | win32.PIPE_READMODE_MESSAGE |
win32.PIPE_WAIT,
1, obsize, ibsize, win32.NMPWAIT_WAIT_FOREVER, win32.NULL
)
h2 = win32.CreateFile(
address, access, 0, win32.NULL, win32.OPEN_EXISTING, 0, win32.NULL
)
win32.SetNamedPipeHandleState(
h2, win32.PIPE_READMODE_MESSAGE, None, None
)
try:
win32.ConnectNamedPipe(h1, win32.NULL)
except WindowsError, e:
if e.args[0] != win32.ERROR_PIPE_CONNECTED:
raise
c1 = _multiprocessing.PipeConnection(h1, writable=duplex)
c2 = _multiprocessing.PipeConnection(h2, readable=duplex)
return c1, c2
#
# Definitions for connections based on sockets
#
class SocketListener(object):
'''
Represtation of a socket which is bound to an address and listening
'''
def __init__(self, address, family, backlog=1):
self._socket = socket.socket(getattr(socket, family))
self._socket.bind(address)
self._socket.listen(backlog)
address = self._socket.getsockname()
if type(address) is tuple:
address = (socket.getfqdn(address[0]),) + address[1:]
self._address = address
self._family = family
self._last_accepted = None
sub_debug('listener bound to address %r', self._address)
if family == 'AF_UNIX':
self._unlink = Finalize(
self, os.unlink, args=(self._address,), exitpriority=0
)
else:
self._unlink = None
def accept(self):
s, self._last_accepted = self._socket.accept()
fd = duplicate(s.fileno())
conn = _multiprocessing.Connection(fd)
s.close()
return conn
def close(self):
self._socket.close()
if self._unlink is not None:
self._unlink()
def SocketClient(address):
'''
Return a connection object connected to the socket given by `address`
'''
family = address_type(address)
s = socket.socket( getattr(socket, family) )
while 1:
try:
s.connect(address)
except socket.error, e:
if e.args[0] != 10061: # 10061 => connection refused
debug('failed to connect to address %s', address)
raise
time.sleep(0.01)
else:
break
else:
raise
fd = duplicate(s.fileno())
conn = _multiprocessing.Connection(fd)
s.close()
return conn
#
# Definitions for connections based on named pipes
#
if sys.platform == 'win32':
class PipeListener(object):
'''
Representation of a named pipe
'''
def __init__(self, address, backlog=None):
self._address = address
handle = win32.CreateNamedPipe(
address, win32.PIPE_ACCESS_DUPLEX,
win32.PIPE_TYPE_MESSAGE | win32.PIPE_READMODE_MESSAGE |
win32.PIPE_WAIT,
win32.PIPE_UNLIMITED_INSTANCES, BUFSIZE, BUFSIZE,
win32.NMPWAIT_WAIT_FOREVER, win32.NULL
)
self._handle_queue = [handle]
self._last_accepted = None
sub_debug('listener created with address=%r', self._address)
self.close = Finalize(
self, PipeListener._finalize_pipe_listener,
args=(self._handle_queue, self._address), exitpriority=0
)
def accept(self):
newhandle = win32.CreateNamedPipe(
self._address, win32.PIPE_ACCESS_DUPLEX,
win32.PIPE_TYPE_MESSAGE | win32.PIPE_READMODE_MESSAGE |
win32.PIPE_WAIT,
win32.PIPE_UNLIMITED_INSTANCES, BUFSIZE, BUFSIZE,
win32.NMPWAIT_WAIT_FOREVER, win32.NULL
)
self._handle_queue.append(newhandle)
handle = self._handle_queue.pop(0)
try:
win32.ConnectNamedPipe(handle, win32.NULL)
except WindowsError, e:
if e.args[0] != win32.ERROR_PIPE_CONNECTED:
raise
return _multiprocessing.PipeConnection(handle)
@staticmethod
def _finalize_pipe_listener(queue, address):
sub_debug('closing listener with address=%r', address)
for handle in queue:
close(handle)
def PipeClient(address):
'''
Return a connection object connected to the pipe given by `address`
'''
while 1:
try:
win32.WaitNamedPipe(address, 1000)
h = win32.CreateFile(
address, win32.GENERIC_READ | win32.GENERIC_WRITE,
0, win32.NULL, win32.OPEN_EXISTING, 0, win32.NULL
)
except WindowsError, e:
if e.args[0] not in (win32.ERROR_SEM_TIMEOUT,
win32.ERROR_PIPE_BUSY):
raise
else:
break
else:
raise
win32.SetNamedPipeHandleState(
h, win32.PIPE_READMODE_MESSAGE, None, None
)
return _multiprocessing.PipeConnection(h)
#
# Authentication stuff
#
MESSAGE_LENGTH = 20
CHALLENGE = '#CHALLENGE#'
WELCOME = '#WELCOME#'
FAILURE = '#FAILURE#'
if sys.version_info >= (3, 0): # XXX can use bytes literals in 2.6/3.0
CHALLENGE = CHALLENGE.encode('ascii')
WELCOME = WELCOME.encode('ascii')
FAILURE = FAILURE.encode('ascii')
def deliver_challenge(connection, authkey):
import hmac
assert isinstance(authkey, bytes)
message = os.urandom(MESSAGE_LENGTH)
connection.send_bytes(CHALLENGE + message)
digest = hmac.new(authkey, message).digest()
response = connection.recv_bytes(256) # reject large message
if response == digest:
connection.send_bytes(WELCOME)
else:
connection.send_bytes(FAILURE)
raise AuthenticationError('digest received was wrong')
def answer_challenge(connection, authkey):
import hmac
assert isinstance(authkey, bytes)
message = connection.recv_bytes(256) # reject large message
assert message[:len(CHALLENGE)] == CHALLENGE, 'message = %r' % message
message = message[len(CHALLENGE):]
digest = hmac.new(authkey, message).digest()
connection.send_bytes(digest)
response = connection.recv_bytes(256) # reject large message
if response != WELCOME:
raise AuthenticationError('digest sent was rejected')
#
# Support for using xmlrpclib for serialization
#
class ConnectionWrapper(object):
def __init__(self, conn, dumps, loads):
self._conn = conn
self._dumps = dumps
self._loads = loads
for attr in ('fileno', 'close', 'poll', 'recv_bytes', 'send_bytes'):
obj = getattr(conn, attr)
setattr(self, attr, obj)
def send(self, obj):
s = self._dumps(obj)
self._conn.send_bytes(s)
def recv(self):
s = self._conn.recv_bytes()
return self._loads(s)
def _xml_dumps(obj):
return xmlrpclib.dumps((obj,), None, None, None, 1).encode('utf8')
def _xml_loads(s):
(obj,), method = xmlrpclib.loads(s.decode('utf8'))
return obj
class XmlListener(Listener):
def accept(self):
global xmlrpclib
import xmlrpclib
obj = Listener.accept(self)
return ConnectionWrapper(obj, _xml_dumps, _xml_loads)
def XmlClient(*args, **kwds):
global xmlrpclib
import xmlrpclib
return ConnectionWrapper(Client(*args, **kwds), _xml_dumps, _xml_loads)
#
# Support for the API of the multiprocessing package using threads
#
# multiprocessing/dummy/__init__.py
#
# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
#
__all__ = [
'Process', 'current_process', 'active_children', 'freeze_support',
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition',
'Event', 'Queue', 'Manager', 'Pipe', 'Pool', 'JoinableQueue'
]
#
# Imports
#
import threading
import sys
import weakref
import array
import itertools
from multiprocessing import TimeoutError, cpu_count
from multiprocessing.dummy.connection import Pipe
from threading import Lock, RLock, Semaphore, BoundedSemaphore
from threading import Event
from Queue import Queue
#
#
#
class DummyProcess(threading.Thread):
def __init__(self, group=None, target=None, name=None, args=(), kwargs={}):
threading.Thread.__init__(self, group, target, name, args, kwargs)
self._pid = None
self._children = weakref.WeakKeyDictionary()
self._start_called = False
self._parent = current_process()
def start(self):
assert self._parent is current_process()
self._start_called = True
self._parent._children[self] = None
threading.Thread.start(self)
def get_exitcode(self):
if self._start_called and not self.isAlive():
return 0
else:
return None
# XXX
if sys.version_info < (3, 0):
is_alive = threading.Thread.isAlive.im_func
get_name = threading.Thread.getName.im_func
set_name = threading.Thread.setName.im_func
is_daemon = threading.Thread.isDaemon.im_func
set_daemon = threading.Thread.setDaemon.im_func
else:
is_alive = threading.Thread.isAlive
get_name = threading.Thread.getName
set_name = threading.Thread.setName
is_daemon = threading.Thread.isDaemon
set_daemon = threading.Thread.setDaemon
#
#
#
class Condition(threading._Condition):
# XXX
if sys.version_info < (3, 0):
notify_all = threading._Condition.notifyAll.im_func
else:
notify_all = threading._Condition.notifyAll
#
#
#
Process = DummyProcess
current_process = threading.currentThread
current_process()._children = weakref.WeakKeyDictionary()
def active_children():
children = current_process()._children
for p in list(children):
if not p.isAlive():
children.pop(p, None)
return list(children)
def freeze_support():
pass
#
#
#
class Namespace(object):
def __init__(self, **kwds):
self.__dict__.update(kwds)
def __repr__(self):
items = self.__dict__.items()
temp = []
for name, value in items:
if not name.startswith('_'):
temp.append('%s=%r' % (name, value))
temp.sort()
return 'Namespace(%s)' % str.join(', ', temp)
dict = dict
list = list
def Array(typecode, sequence, lock=True):
return array.array(typecode, sequence)
class Value(object):
def __init__(self, typecode, value, lock=True):
self._typecode = typecode
self._value = value
def _get(self):
return self._value
def _set(self, value):
self._value = value
value = property(_get, _set)
def __repr__(self):
return '<%r(%r, %r)>'%(type(self).__name__,self._typecode,self._value)
def Manager():
return sys.modules[__name__]
def shutdown():
pass
def Pool(processes=None, initializer=None, initargs=()):
from multiprocessing.pool import ThreadPool
return ThreadPool(processes, initializer, initargs)
JoinableQueue = Queue
#
# Analogue of `multiprocessing.connection` which uses queues instead of sockets
#
# multiprocessing/dummy/connection.py
#
# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
#
__all__ = [ 'Client', 'Listener', 'Pipe' ]
from Queue import Queue
families = [None]
class Listener(object):
def __init__(self, address=None, family=None, backlog=1):
self._backlog_queue = Queue(backlog)
def accept(self):
return Connection(*self._backlog_queue.get())
def close(self):
self._backlog_queue = None
address = property(lambda self: self._backlog_queue)
def Client(address):
_in, _out = Queue(), Queue()
address.put((_out, _in))
return Connection(_in, _out)
def Pipe(duplex=True):
a, b = Queue(), Queue()
return Connection(a, b), Connection(b, a)
class Connection(object):
def __init__(self, _in, _out):
self._out = _out
self._in = _in
self.send = self.send_bytes = _out.put
self.recv = self.recv_bytes = _in.get
def poll(self, timeout=0.0):
if self._in.qsize() > 0:
return True
if timeout <= 0.0:
return False
self._in.not_empty.acquire()
self._in.not_empty.wait(timeout)
self._in.not_empty.release()
return self._in.qsize() > 0
def close(self):
pass
#
# Module for starting a process object using os.fork() or CreateProcess()
#
# multiprocessing/forking.py
#
# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
#
import os
import sys
import signal
from multiprocessing import util, process
__all__ = ['Popen', 'assert_spawning', 'exit', 'duplicate', 'close']
#
# Check that the current thread is spawning a child process
#
def assert_spawning(self):
if not Popen.thread_is_spawning():
raise RuntimeError(
'%s objects should only be shared between processes'
' through inheritance' % type(self).__name__
)
#
# Unix
#
if sys.platform != 'win32':
import time
exit = os._exit
duplicate = os.dup
close = os.close
#
# We define a Popen class similar to the one from subprocess, but
# whose constructor takes a process object as its argument.
#
class Popen(object):
def __init__(self, process_obj):
sys.stdout.flush()
sys.stderr.flush()
self.returncode = None
self.pid = os.fork()
if self.pid == 0:
if 'random' in sys.modules:
import random
random.seed()
code = process_obj._bootstrap()
sys.stdout.flush()
sys.stderr.flush()
os._exit(code)
def poll(self, flag=os.WNOHANG):
if self.returncode is None:
pid, sts = os.waitpid(self.pid, flag)
if pid == self.pid:
if os.WIFSIGNALED(sts):
self.returncode = -os.WTERMSIG(sts)
else:
assert os.WIFEXITED(sts)
self.returncode = os.WEXITSTATUS(sts)
return self.returncode
def wait(self, timeout=None):
if timeout is None:
return self.poll(0)
deadline = time.time() + timeout
delay = 0.0005
while 1:
res = self.poll()
if res is not None:
break
remaining = deadline - time.time()
if remaining <= 0:
break
delay = min(delay * 2, remaining, 0.05)
time.sleep(delay)
return res
def terminate(self):
if self.returncode is None:
try:
os.kill(self.pid, signal.SIGTERM)
except OSError, e:
if self.wait(timeout=0.1) is None:
raise
@staticmethod
def thread_is_spawning():
return False
#
# Windows
#
else:
import thread
import msvcrt
import _subprocess
import copy_reg
import time
from ._multiprocessing import win32, Connection, PipeConnection
from .util import Finalize
try:
from cPickle import dump, load, HIGHEST_PROTOCOL
except ImportError:
from pickle import dump, load, HIGHEST_PROTOCOL
#
#
#
TERMINATE = 0x10000
WINEXE = (sys.platform == 'win32' and getattr(sys, 'frozen', False))
exit = win32.ExitProcess
close = win32.CloseHandle
#
# _python_exe is the assumed path to the python executable.
# People embedding Python want to modify it.
#
if sys.executable.lower().endswith('pythonservice.exe'):
_python_exe = os.path.join(sys.exec_prefix, 'python.exe')
else:
_python_exe = sys.executable
def set_executable(exe):
global _python_exe
_python_exe = exe
#
#
#
def duplicate(handle, target_process=None, inheritable=False):
if target_process is None:
target_process = _subprocess.GetCurrentProcess()
return _subprocess.DuplicateHandle(
_subprocess.GetCurrentProcess(), handle, target_process,
0, inheritable, _subprocess.DUPLICATE_SAME_ACCESS
).Detach()
#
# We define a Popen class similar to the one from subprocess, but
# whose constructor takes a process object as its argument.
#
class Popen(object):
'''
Start a subprocess to run the code of a process object
'''
_tls = thread._local()
def __init__(self, process_obj):
# create pipe for communication with child
rfd, wfd = os.pipe()
# get handle for read end of the pipe and make it inheritable
rhandle = duplicate(msvcrt.get_osfhandle(rfd), inheritable=True)
os.close(rfd)
# start process
cmd = get_command_line() + [rhandle]
cmd = ' '.join('"%s"' % x for x in cmd)
hp, ht, pid, tid = _subprocess.CreateProcess(
_python_exe, cmd, None, None, 1, 0, None, None, None
)
ht.Close()
close(rhandle)
# set attributes of self
self.pid = pid
self.returncode = None
self._handle = hp
# send information to child
prep_data = get_preparation_data(process_obj._name)
to_child = os.fdopen(wfd, 'wb')
Popen._tls.process_handle = int(hp)
try:
dump(prep_data, to_child, HIGHEST_PROTOCOL)
dump(process_obj, to_child, HIGHEST_PROTOCOL)
finally:
del Popen._tls.process_handle
to_child.close()
@staticmethod
def thread_is_spawning():
return getattr(Popen._tls, 'process_handle', None) is not None
@staticmethod
def duplicate_for_child(handle):
return duplicate(handle, Popen._tls.process_handle)
def wait(self, timeout=None):
if self.returncode is None:
if timeout is None:
msecs = _subprocess.INFINITE
else:
msecs = max(0, int(timeout * 1000 + 0.5))
res = _subprocess.WaitForSingleObject(int(self._handle), msecs)
if res == _subprocess.WAIT_OBJECT_0:
code = _subprocess.GetExitCodeProcess(self._handle)
if code == TERMINATE:
code = -signal.SIGTERM
self.returncode = code
return self.returncode
def poll(self):
return self.wait(timeout=0)
def terminate(self):
if self.returncode is None:
try:
_subprocess.TerminateProcess(int(self._handle), TERMINATE)
except WindowsError:
if self.wait(timeout=0.1) is None:
raise
#
#
#
def is_forking(argv):
'''
Return whether commandline indicates we are forking
'''
if len(argv) >= 2 and argv[1] == '--multiprocessing-fork':
assert len(argv) == 3
return True
else:
return False
def freeze_support():
'''
Run code for process object if this in not the main process
'''
if is_forking(sys.argv):
main()
sys.exit()
def get_command_line():
'''
Returns prefix of command line used for spawning a child process
'''
if process.current_process()._identity==() and is_forking(sys.argv):
raise RuntimeError('''
Attempt to start a new process before the current process
has finished its bootstrapping phase.
This probably means that you are on Windows and you have
forgotten to use the proper idiom in the main module:
if __name__ == '__main__':
freeze_support()
...
The "freeze_support()" line can be omitted if the program
is not going to be frozen to produce a Windows executable.''')
if getattr(sys, 'frozen', False):
return [sys.executable, '--multiprocessing-fork']
else:
prog = 'from multiprocessing.forking import main; main()'
return [_python_exe, '-c', prog, '--multiprocessing-fork']
def main():
'''
Run code specifed by data received over pipe
'''
assert is_forking(sys.argv)
handle = int(sys.argv[-1])
fd = msvcrt.open_osfhandle(handle, os.O_RDONLY)
from_parent = os.fdopen(fd, 'rb')
process.current_process()._inheriting = True
preparation_data = load(from_parent)
prepare(preparation_data)
self = load(from_parent)
process.current_process()._inheriting = False
from_parent.close()
exitcode = self._bootstrap()
exit(exitcode)
def get_preparation_data(name):
'''
Return info about parent needed by child to unpickle process object
'''
from .util import _logger, _log_to_stderr
d = dict(
name=name,
sys_path=sys.path,
sys_argv=sys.argv,
log_to_stderr=_log_to_stderr,
orig_dir=process.ORIGINAL_DIR,
authkey=process.current_process().get_authkey(),
)
if _logger is not None:
d['log_level'] = _logger.getEffectiveLevel()
if not WINEXE:
main_path = getattr(sys.modules['__main__'], '__file__', None)
if not main_path and sys.argv[0] not in ('', '-c'):
main_path = sys.argv[0]
if main_path is not None:
if not os.path.isabs(main_path) and \
process.ORIGINAL_DIR is not None:
main_path = os.path.join(process.ORIGINAL_DIR, main_path)
d['main_path'] = os.path.normpath(main_path)
return d
#
# Make (Pipe)Connection picklable
#
def reduce_connection(conn):
if not Popen.thread_is_spawning():
raise RuntimeError(
'By default %s objects can only be shared between processes\n'
'using inheritance' % type(conn).__name__
)
return type(conn), (Popen.duplicate_for_child(conn.fileno()),
conn.readable, conn.writable)
copy_reg.pickle(Connection, reduce_connection)
copy_reg.pickle(PipeConnection, reduce_connection)
#
# Prepare current process
#
old_main_modules = []
def prepare(data):
'''
Try to get current process ready to unpickle process object
'''
old_main_modules.append(sys.modules['__main__'])
if 'name' in data:
process.current_process().set_name(data['name'])
if 'authkey' in data:
process.current_process()._authkey = data['authkey']
if 'log_to_stderr' in data and data['log_to_stderr']:
util.log_to_stderr()
if 'log_level' in data:
util.get_logger().setLevel(data['log_level'])
if 'sys_path' in data:
sys.path = data['sys_path']
if 'sys_argv' in data:
sys.argv = data['sys_argv']
if 'dir' in data:
os.chdir(data['dir'])
if 'orig_dir' in data:
process.ORIGINAL_DIR = data['orig_dir']
if 'main_path' in data:
main_path = data['main_path']
main_name = os.path.splitext(os.path.basename(main_path))[0]
if main_name == '__init__':
main_name = os.path.basename(os.path.dirname(main_path))
if main_name != 'ipython':
import imp
if main_path is None:
dirs = None
elif os.path.basename(main_path).startswith('__init__.py'):
dirs = [os.path.dirname(os.path.dirname(main_path))]
else:
dirs = [os.path.dirname(main_path)]
assert main_name not in sys.modules, main_name
file, path_name, etc = imp.find_module(main_name, dirs)
try:
# We would like to do "imp.load_module('__main__', ...)"
# here. However, that would cause 'if __name__ ==
# "__main__"' clauses to be executed.
main_module = imp.load_module(
'__parents_main__', file, path_name, etc
)
finally:
if file:
file.close()
sys.modules['__main__'] = main_module
main_module.__name__ = '__main__'
# Try to make the potentially picklable objects in
# sys.modules['__main__'] realize they are in the main
# module -- somewhat ugly.
for obj in main_module.__dict__.values():
try:
if obj.__module__ == '__parents_main__':
obj.__module__ = '__main__'
except Exception:
pass
#
# Module which supports allocation of memory from an mmap
#
# multiprocessing/heap.py
#
# Copyright (c) 2007-2008, R Oudkerk --- see COPYING.txt
#
import bisect
import mmap
import tempfile
import os
import sys
import threading
import itertools
import _multiprocessing
from multiprocessing.util import Finalize, info
from multiprocessing.forking import assert_spawning
__all__ = ['BufferWrapper']
#
# Inheirtable class which wraps an mmap, and from which blocks can be allocated
#
if sys.platform == 'win32':
from ._multiprocessing import win32
class Arena(object):
_counter = itertools.count()
def __init__(self, size):
self.size = size
self.name = 'pym-%d-%d' % (os.getpid(), Arena._counter.next())
self.buffer = mmap.mmap(-1, self.size, tagname=self.name)
assert win32.GetLastError() == 0, 'tagname already in use'
self._state = (self.size, self.name)
def __getstate__(self):
assert_spawning(self)
return self._state
def __setstate__(self, state):
self.size, self.name = self._state = state
self.buffer = mmap.mmap(-1, self.size, tagname=self.name)
assert win32.GetLastError() == win32.ERROR_ALREADY_EXISTS
else:
class Arena(object):
def __init__(self, size):
self.buffer = mmap.mmap(-1, size)
self.size = size
self.name = None
#
# Class allowing allocation of chunks of memory from arenas
#
class Heap(object):
_alignment = 8
def __init__(self, size=mmap.PAGESIZE):
self._lastpid = os.getpid()
self._lock = threading.Lock()
self._size = size
self._lengths = []
self._len_to_seq = {}
self._start_to_block = {}
self._stop_to_block = {}
self._allocated_blocks = set()
self._arenas = []
@staticmethod
def _roundup(n, alignment):
# alignment must be a power of 2
mask = alignment - 1
return (n + mask) & ~mask
def _malloc(self, size):
# returns a large enough block -- it might be much larger
i = bisect.bisect_left(self._lengths, size)
if i == len(self._lengths):
length = self._roundup(max(self._size, size), mmap.PAGESIZE)
self._size *= 2
info('allocating a new mmap of length %d', length)
arena = Arena(length)
self._arenas.append(arena)
return (arena, 0, length)
else:
length = self._lengths[i]
seq = self._len_to_seq[length]
block = seq.pop()
if not seq:
del self._len_to_seq[length], self._lengths[i]
(arena, start, stop) = block
del self._start_to_block[(arena, start)]
del self._stop_to_block[(arena, stop)]
return block
def _free(self, block):
# free location and try to merge with neighbours
(arena, start, stop) = block
try:
prev_block = self._stop_to_block[(arena, start)]
except KeyError:
pass
else:
start, _ = self._absorb(prev_block)
try:
next_block = self._start_to_block[(arena, stop)]
except KeyError:
pass
else:
_, stop = self._absorb(next_block)
block = (arena, start, stop)
length = stop - start
try:
self._len_to_seq[length].append(block)
except KeyError:
self._len_to_seq[length] = [block]
bisect.insort(self._lengths, length)
self._start_to_block[(arena, start)] = block
self._stop_to_block[(arena, stop)] = block
def _absorb(self, block):
# deregister this block so it can be merged with a neighbour
(arena, start, stop) = block
del self._start_to_block[(arena, start)]
del self._stop_to_block[(arena, stop)]
length = stop - start
seq = self._len_to_seq[length]
seq.remove(block)
if not seq:
del self._len_to_seq[length]
self._lengths.remove(length)
return start, stop
def free(self, block):
# free a block returned by malloc()
assert os.getpid() == self._lastpid
self._lock.acquire()
try:
self._allocated_blocks.remove(block)
self._free(block)
finally:
self._lock.release()
def malloc(self, size):
# return a block of right size (possibly rounded up)
assert 0 <= size < sys.maxint
if os.getpid() != self._lastpid:
self.__init__() # reinitialize after fork
self._lock.acquire()
try:
size = self._roundup(max(size,1), self._alignment)
(arena, start, stop) = self._malloc(size)
new_stop = start + size
if new_stop < stop:
self._free((arena, new_stop, stop))
block = (arena, start, new_stop)
self._allocated_blocks.add(block)
return block
finally:
self._lock.release()
#
# Class representing a chunk of an mmap -- can be inherited
#
class BufferWrapper(object):
_heap = Heap()
def __init__(self, size):
assert 0 <= size < sys.maxint
block = BufferWrapper._heap.malloc(size)
self._state = (block, size)
Finalize(self, BufferWrapper._heap.free, args=(block,))
def get_address(self):
(arena, start, stop), size = self._state
address, length = _multiprocessing.address_of_buffer(arena.buffer)
assert size <= length
return address + start
def get_size(self):
return self._state[1]
#
# Module providing the `SyncManager` class for dealing
# with shared objects
#
# multiprocessing/managers.py
#
# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
#
__all__ = [ 'BaseManager', 'SyncManager', 'BaseProxy', 'Token' ]
#
# Imports
#
import os
import sys
import weakref
import threading
import array
import copy_reg
import Queue
from traceback import format_exc
from multiprocessing import Process, current_process, active_children, Pool, util, connection
from multiprocessing.process import AuthenticationString
from multiprocessing.forking import exit, Popen, assert_spawning
from multiprocessing.util import Finalize, info
try:
from cPickle import PicklingError
except ImportError:
from pickle import PicklingError
#
#
#
try:
bytes
except NameError:
bytes = str # XXX not needed in Py2.6 and Py3.0
#
# Register some things for pickling
#
def reduce_array(a):
return array.array, (a.typecode, a.tostring())
copy_reg.pickle(array.array, reduce_array)
view_types = [type(getattr({}, name)()) for name in ('items','keys','values')]
if view_types[0] is not list: # XXX only needed in Py3.0
def rebuild_as_list(obj):
return list, (list(obj),)
for view_type in view_types:
copy_reg.pickle(view_type, rebuild_as_list)
#
# Type for identifying shared objects
#
class Token(object):
'''
Type to uniquely indentify a shared object
'''
__slots__ = ('typeid', 'address', 'id')
def __init__(self, typeid, address, id):
(self.typeid, self.address, self.id) = (typeid, address, id)
def __getstate__(self):
return (self.typeid, self.address, self.id)
def __setstate__(self, state):
(self.typeid, self.address, self.id) = state
def __repr__(self):
return 'Token(typeid=%r, address=%r, id=%r)' % \
(self.typeid, self.address, self.id)
#
# Function for communication with a manager's server process
#
def dispatch(c, id, methodname, args=(), kwds={}):
'''
Send a message to manager using connection `c` and return response
'''
c.send((id, methodname, args, kwds))
kind, result = c.recv()
if kind == '#RETURN':
return result
raise convert_to_error(kind, result)
def convert_to_error(kind, result):
if kind == '#ERROR':
return result
elif kind == '#TRACEBACK':
assert type(result) is str
return RemoteError(result)
elif kind == '#UNSERIALIZABLE':
assert type(result) is str
return RemoteError('Unserializable message: %s\n' % result)
else:
return ValueError('Unrecognized message type')
class RemoteError(Exception):
def __str__(self):
return ('\n' + '-'*75 + '\n' + str(self.args[0]) + '-'*75)
#
# Functions for finding the method names of an object
#
def all_methods(obj):
'''
Return a list of names of methods of `obj`
'''
temp = []
for name in dir(obj):
func = getattr(obj, name)
if hasattr(func, '__call__'):
temp.append(name)
return temp
def public_methods(obj):
'''
Return a list of names of methods of `obj` which do not start with '_'
'''
return [name for name in all_methods(obj) if name[0] != '_']
#
# Server which is run in a process controlled by a manager
#
class Server(object):
'''
Server class which runs in a process controlled by a manager object
'''
public = ['shutdown', 'create', 'accept_connection', 'get_methods',
'debug_info', 'number_of_objects', 'dummy', 'incref', 'decref']
def __init__(self, registry, address, authkey, serializer):
assert isinstance(authkey, bytes)
self.registry = registry
self.authkey = AuthenticationString(authkey)
Listener, Client = listener_client[serializer]
# do authentication later
self.listener = Listener(address=address, backlog=5)
self.address = self.listener.address
self.id_to_obj = {0: (None, ())}
self.id_to_refcount = {}
self.mutex = threading.RLock()
self.stop = 0
def serve_forever(self):
'''
Run the server forever
'''
current_process()._manager_server = self
try:
try:
while 1:
try:
c = self.listener.accept()
except (OSError, IOError):
continue
t = threading.Thread(target=self.handle_request, args=(c,))
t.setDaemon(True)
t.start()
except (KeyboardInterrupt, SystemExit):
pass
finally:
self.stop = 999
self.listener.close()
def handle_request(self, c):
'''
Handle a new connection
'''
funcname = result = request = None
try:
connection.deliver_challenge(c, self.authkey)
connection.answer_challenge(c, self.authkey)
request = c.recv()
ignore, funcname, args, kwds = request
assert funcname in self.public, '%r unrecognized' % funcname
func = getattr(self, funcname)
except Exception:
msg = ('#TRACEBACK', format_exc())
else:
try:
result = func(c, *args, **kwds)
except Exception:
msg = ('#TRACEBACK', format_exc())
else:
msg = ('#RETURN', result)
try:
c.send(msg)
except Exception, e:
try:
c.send(('#TRACEBACK', format_exc()))
except Exception:
pass
util.info('Failure to send message: %r', msg)
util.info(' ... request was %r', request)
util.info(' ... exception was %r', e)
c.close()
def serve_client(self, conn):
'''
Handle requests from the proxies in a particular process/thread
'''
util.debug('starting server thread to service %r',
threading.currentThread().getName())
recv = conn.recv
send = conn.send
id_to_obj = self.id_to_obj
while not self.stop:
try:
methodname = obj = None
request = recv()
ident, methodname, args, kwds = request
obj, exposed, gettypeid = id_to_obj[ident]
if methodname not in exposed:
raise AttributeError(
'method %r of %r object is not in exposed=%r' %
(methodname, type(obj), exposed)
)
function = getattr(obj, methodname)
try:
res = function(*args, **kwds)
except Exception, e:
msg = ('#ERROR', e)
else:
typeid = gettypeid and gettypeid.get(methodname, None)
if typeid:
rident, rexposed = self.create(conn, typeid, res)
token = Token(typeid, self.address, rident)
msg = ('#PROXY', (rexposed, token))
else:
msg = ('#RETURN', res)
except AttributeError:
if methodname is None:
msg = ('#TRACEBACK', format_exc())
else:
try:
fallback_func = self.fallback_mapping[methodname]
result = fallback_func(
self, conn, ident, obj, *args, **kwds
)
msg = ('#RETURN', result)
except Exception:
msg = ('#TRACEBACK', format_exc())
except EOFError:
util.debug('got EOF -- exiting thread serving %r',
threading.currentThread().getName())
sys.exit(0)
except Exception:
msg = ('#TRACEBACK', format_exc())
try:
try:
send(msg)
except Exception, e:
send(('#UNSERIALIZABLE', repr(msg)))
except Exception, e:
util.info('exception in thread serving %r',
threading.currentThread().getName())
util.info(' ... message was %r', msg)
util.info(' ... exception was %r', e)
conn.close()
sys.exit(1)
def fallback_getvalue(self, conn, ident, obj):
return obj
def fallback_str(self, conn, ident, obj):
return str(obj)
def fallback_repr(self, conn, ident, obj):
return repr(obj)
fallback_mapping = {
'__str__':fallback_str,
'__repr__':fallback_repr,
'#GETVALUE':fallback_getvalue
}
def dummy(self, c):
pass
def debug_info(self, c):
'''
Return some info --- useful to spot problems with refcounting
'''
self.mutex.acquire()
try:
result = []
keys = self.id_to_obj.keys()
keys.sort()
for ident in keys:
if ident != 0:
result.append(' %s: refcount=%s\n %s' %
(ident, self.id_to_refcount[ident],
str(self.id_to_obj[ident][0])[:75]))
return '\n'.join(result)
finally:
self.mutex.release()
def number_of_objects(self, c):
'''
Number of shared objects
'''
return len(self.id_to_obj) - 1 # don't count ident=0
def shutdown(self, c):
'''
Shutdown this process
'''
try:
try:
util.debug('manager received shutdown message')
c.send(('#RETURN', None))
if sys.stdout != sys.__stdout__:
util.debug('resetting stdout, stderr')
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
util._run_finalizers(0)
for p in active_children():
util.debug('terminating a child process of manager')
p.terminate()
for p in active_children():
util.debug('terminating a child process of manager')
p.join()
util._run_finalizers()
util.info('manager exiting with exitcode 0')
except:
import traceback
traceback.print_exc()
finally:
exit(0)
def create(self, c, typeid, *args, **kwds):
'''
Create a new shared object and return its id
'''
self.mutex.acquire()
try:
callable, exposed, method_to_typeid, proxytype = \
self.registry[typeid]
if callable is None:
assert len(args) == 1 and not kwds
obj = args[0]
else:
obj = callable(*args, **kwds)
if exposed is None:
exposed = public_methods(obj)
if method_to_typeid is not None:
assert type(method_to_typeid) is dict
exposed = list(exposed) + list(method_to_typeid)
ident = '%x' % id(obj) # convert to string because xmlrpclib
# only has 32 bit signed integers
util.debug('%r callable returned object with id %r', typeid, ident)
self.id_to_obj[ident] = (obj, set(exposed), method_to_typeid)
if ident not in self.id_to_refcount:
self.id_to_refcount[ident] = None
return ident, tuple(exposed)
finally:
self.mutex.release()
def get_methods(self, c, token):
'''
Return the methods of the shared object indicated by token
'''
return tuple(self.id_to_obj[token.id][1])
def accept_connection(self, c, name):
'''
Spawn a new thread to serve this connection
'''
threading.currentThread().setName(name)
c.send(('#RETURN', None))
self.serve_client(c)
def incref(self, c, ident):
self.mutex.acquire()
try:
try:
self.id_to_refcount[ident] += 1
except TypeError:
assert self.id_to_refcount[ident] is None
self.id_to_refcount[ident] = 1
finally:
self.mutex.release()
def decref(self, c, ident):
self.mutex.acquire()
try:
assert self.id_to_refcount[ident] >= 1
self.id_to_refcount[ident] -= 1
if self.id_to_refcount[ident] == 0:
del self.id_to_obj[ident], self.id_to_refcount[ident]
util.debug('disposing of obj with id %d', ident)
finally:
self.mutex.release()
#
# Class to represent state of a manager
#
class State(object):
__slots__ = ['value']
INITIAL = 0
STARTED = 1
SHUTDOWN = 2
#
# Mapping from serializer name to Listener and Client types
#
listener_client = {
'pickle' : (connection.Listener, connection.Client),
'xmlrpclib' : (connection.XmlListener, connection.XmlClient)
}
#
# Definition of BaseManager
#
class BaseManager(object):
'''
Base class for managers
'''
_registry = {}
_Server = Server
def __init__(self, address=None, authkey=None, serializer='pickle'):
if authkey is None:
authkey = current_process().get_authkey()
self._address = address # XXX not final address if eg ('', 0)
self._authkey = AuthenticationString(authkey)
self._state = State()
self._state.value = State.INITIAL
self._serializer = serializer
self._Listener, self._Client = listener_client[serializer]
def __reduce__(self):
return type(self).from_address, \
(self._address, self._authkey, self._serializer)
def get_server(self):
'''
Return server object with serve_forever() method and address attribute
'''
assert self._state.value == State.INITIAL
return Server(self._registry, self._address,
self._authkey, self._serializer)
def connect(self):
'''
Connect manager object to the server process
'''
Listener, Client = listener_client[self._serializer]
conn = Client(self._address, authkey=self._authkey)
dispatch(conn, None, 'dummy')
self._state.value = State.STARTED
def start(self):
'''
Spawn a server process for this manager object
'''
assert self._state.value == State.INITIAL
# pipe over which we will retrieve address of server
reader, writer = connection.Pipe(duplex=False)
# spawn process which runs a server
self._process = Process(
target=type(self)._run_server,
args=(self._registry, self._address, self._authkey,
self._serializer, writer),
)
ident = ':'.join(str(i) for i in self._process._identity)
self._process.set_name(type(self).__name__ + '-' + ident)
self._process.start()
# get address of server
writer.close()
self._address = reader.recv()
reader.close()
# register a finalizer
self._state.value = State.STARTED
self.shutdown = util.Finalize(
self, type(self)._finalize_manager,
args=(self._process, self._address, self._authkey,
self._state, self._Client),
exitpriority=0
)
@classmethod
def _run_server(cls, registry, address, authkey, serializer, writer):
'''
Create a server, report its address and run it
'''
# create server
server = cls._Server(registry, address, authkey, serializer)
# inform parent process of the server's address
writer.send(server.address)
writer.close()
# run the manager
util.info('manager serving at %r', server.address)
server.serve_forever()
def _create(self, typeid, *args, **kwds):
'''
Create a new shared object; return the token and exposed tuple
'''
assert self._state.value == State.STARTED, 'server not yet started'
conn = self._Client(self._address, authkey=self._authkey)
try:
id, exposed = dispatch(conn, None, 'create', (typeid,)+args, kwds)
finally:
conn.close()
return Token(typeid, self._address, id), exposed
def join(self, timeout=None):
'''
Join the manager process (if it has been spawned)
'''
self._process.join(timeout)
def _debug_info(self):
'''
Return some info about the servers shared objects and connections
'''
conn = self._Client(self._address, authkey=self._authkey)
try:
return dispatch(conn, None, 'debug_info')
finally:
conn.close()
def _number_of_objects(self):
'''
Return the number of shared objects
'''
conn = self._Client(self._address, authkey=self._authkey)
try:
return dispatch(conn, None, 'number_of_objects')
finally:
conn.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.shutdown()
@staticmethod
def _finalize_manager(process, address, authkey, state, _Client):
'''
Shutdown the manager process; will be registered as a finalizer
'''
if process.is_alive():
util.info('sending shutdown message to manager')
try:
conn = _Client(address, authkey=authkey)
try:
dispatch(conn, None, 'shutdown')
finally:
conn.close()
except Exception:
pass
process.join(timeout=0.2)
if process.is_alive():
util.info('manager still alive')
if hasattr(process, 'terminate'):
util.info('trying to `terminate()` manager process')
process.terminate()
process.join(timeout=0.1)
if process.is_alive():
util.info('manager still alive after terminate')
state.value = State.SHUTDOWN
try:
del BaseProxy._address_to_local[address]
except KeyError:
pass
address = property(lambda self: self._address)
@classmethod
def register(cls, typeid, callable=None, proxytype=None, exposed=None,
method_to_typeid=None, create_method=True):
'''
Register a typeid with the manager type
'''
if '_registry' not in cls.__dict__:
cls._registry = cls._registry.copy()
if proxytype is None:
proxytype = AutoProxy
exposed = exposed or getattr(proxytype, '_exposed_', None)
method_to_typeid = method_to_typeid or \
getattr(proxytype, '_method_to_typeid_', None)
if method_to_typeid:
for key, value in method_to_typeid.items():
assert type(key) is str, '%r is not a string' % key
assert type(value) is str, '%r is not a string' % value
cls._registry[typeid] = (
callable, exposed, method_to_typeid, proxytype
)
if create_method:
def temp(self, *args, **kwds):
util.debug('requesting creation of a shared %r object', typeid)
token, exp = self._create(typeid, *args, **kwds)
proxy = proxytype(
token, self._serializer, manager=self,
authkey=self._authkey, exposed=exp
)
return proxy
temp.__name__ = typeid
setattr(cls, typeid, temp)
#
# Subclass of set which get cleared after a fork
#
class ProcessLocalSet(set):
def __init__(self):
util.register_after_fork(self, lambda obj: obj.clear())
def __reduce__(self):
return type(self), ()
#
# Definition of BaseProxy
#
class BaseProxy(object):
'''
A base for proxies of shared objects
'''
_address_to_local = {}
_mutex = util.ForkAwareThreadLock()
def __init__(self, token, serializer, manager=None,
authkey=None, exposed=None, incref=True):
BaseProxy._mutex.acquire()
try:
tls_idset = BaseProxy._address_to_local.get(token.address, None)
if tls_idset is None:
tls_idset = util.ForkAwareLocal(), ProcessLocalSet()
BaseProxy._address_to_local[token.address] = tls_idset
finally:
BaseProxy._mutex.release()
# self._tls is used to record the connection used by this
# thread to communicate with the manager at token.address
self._tls = tls_idset[0]
# self._idset is used to record the identities of all shared
# objects for which the current process owns references and
# which are in the manager at token.address
self._idset = tls_idset[1]
self._token = token
self._id = self._token.id
self._manager = manager
self._serializer = serializer
self._Client = listener_client[serializer][1]
if authkey is not None:
self._authkey = AuthenticationString(authkey)
elif self._manager is not None:
self._authkey = self._manager._authkey
else:
self._authkey = current_process().get_authkey()
if incref:
self._incref()
util.register_after_fork(self, BaseProxy._after_fork)
def _connect(self):
util.debug('making connection to manager')
name = current_process().get_name()
if threading.currentThread().getName() != 'MainThread':
name += '|' + threading.currentThread().getName()
conn = self._Client(self._token.address, authkey=self._authkey)
dispatch(conn, None, 'accept_connection', (name,))
self._tls.connection = conn
def _callmethod(self, methodname, args=(), kwds={}):
'''
Try to call a method of the referrent and return a copy of the result
'''
try:
conn = self._tls.connection
except AttributeError:
util.debug('thread %r does not own a connection',
threading.currentThread().getName())
self._connect()
conn = self._tls.connection
conn.send((self._id, methodname, args, kwds))
kind, result = conn.recv()
if kind == '#RETURN':
return result
elif kind == '#PROXY':
exposed, token = result
proxytype = self._manager._registry[token.typeid][-1]
return proxytype(
token, self._serializer, manager=self._manager,
authkey=self._authkey, exposed=exposed
)
raise convert_to_error(kind, result)
def _getvalue(self):
'''
Get a copy of the value of the referent
'''
return self._callmethod('#GETVALUE')
def _incref(self):
conn = self._Client(self._token.address, authkey=self._authkey)
dispatch(conn, None, 'incref', (self._id,))
util.debug('INCREF %r', self._token.id)
self._idset.add(self._id)
state = self._manager and self._manager._state
self._close = util.Finalize(
self, BaseProxy._decref,
args=(self._token, self._authkey, state,
self._tls, self._idset, self._Client),
exitpriority=10
)
@staticmethod
def _decref(token, authkey, state, tls, idset, _Client):
idset.discard(token.id)
# check whether manager is still alive
if state is None or state.value == State.STARTED:
# tell manager this process no longer cares about referent
try:
util.debug('DECREF %r', token.id)
conn = _Client(token.address, authkey=authkey)
dispatch(conn, None, 'decref', (token.id,))
except Exception, e:
util.debug('... decref failed %s', e)
else:
util.debug('DECREF %r -- manager already shutdown', token.id)
# check whether we can close this thread's connection because
# the process owns no more references to objects for this manager
if not idset and hasattr(tls, 'connection'):
util.debug('thread %r has no more proxies so closing conn',
threading.currentThread().getName())
tls.connection.close()
del tls.connection
def _after_fork(self):
self._manager = None
try:
self._incref()
except Exception, e:
# the proxy may just be for a manager which has shutdown
util.info('incref failed: %s' % e)
def __reduce__(self):
kwds = {}
if Popen.thread_is_spawning():
kwds['authkey'] = self._authkey
if getattr(self, '_isauto', False):
kwds['exposed'] = self._exposed_
return (RebuildProxy,
(AutoProxy, self._token, self._serializer, kwds))
else:
return (RebuildProxy,
(type(self), self._token, self._serializer, kwds))
def __deepcopy__(self, memo):
return self._getvalue()
def __repr__(self):
return '<%s object, typeid %r at %s>' % \
(type(self).__name__, self._token.typeid, '0x%x' % id(self))
def __str__(self):
'''
Return representation of the referent (or a fall-back if that fails)
'''
try:
return self._callmethod('__repr__')
except Exception:
return repr(self)[:-1] + "; '__str__()' failed>"
#
# Function used for unpickling
#
def RebuildProxy(func, token, serializer, kwds):
'''
Function used for unpickling proxy objects.
If possible the shared object is returned, or otherwise a proxy for it.
'''
server = getattr(current_process(), '_manager_server', None)
if server and server.address == token.address:
return server.id_to_obj[token.id][0]
else:
incref = (
kwds.pop('incref', True) and
not getattr(current_process(), '_inheriting', False)
)
return func(token, serializer, incref=incref, **kwds)
#
# Functions to create proxies and proxy types
#
def MakeProxyType(name, exposed, _cache={}):
'''
Return an proxy type whose methods are given by `exposed`
'''
exposed = tuple(exposed)
try:
return _cache[(name, exposed)]
except KeyError:
pass
dic = {}
for meth in exposed:
exec '''def %s(self, *args, **kwds):
return self._callmethod(%r, args, kwds)''' % (meth, meth) in dic
ProxyType = type(name, (BaseProxy,), dic)
ProxyType._exposed_ = exposed
_cache[(name, exposed)] = ProxyType
return ProxyType
def AutoProxy(token, serializer, manager=None, authkey=None,
exposed=None, incref=True):
'''
Return an auto-proxy for `token`
'''
_Client = listener_client[serializer][1]
if exposed is None:
conn = _Client(token.address, authkey=authkey)
try:
exposed = dispatch(conn, None, 'get_methods', (token,))
finally:
conn.close()
if authkey is None and manager is not None:
authkey = manager._authkey
if authkey is None:
authkey = current_process().get_authkey()
ProxyType = MakeProxyType('AutoProxy[%s]' % token.typeid, exposed)
proxy = ProxyType(token, serializer, manager=manager, authkey=authkey,
incref=incref)
proxy._isauto = True
return proxy
#
# Types/callables which we will register with SyncManager
#
class Namespace(object):
def __init__(self, **kwds):
self.__dict__.update(kwds)
def __repr__(self):
items = self.__dict__.items()
temp = []
for name, value in items:
if not name.startswith('_'):
temp.append('%s=%r' % (name, value))
temp.sort()
return 'Namespace(%s)' % str.join(', ', temp)
class Value(object):
def __init__(self, typecode, value, lock=True):
self._typecode = typecode
self._value = value
def get(self):
return self._value
def set(self, value):
self._value = value
def __repr__(self):
return '%s(%r, %r)'%(type(self).__name__, self._typecode, self._value)
value = property(get, set)
def Array(typecode, sequence, lock=True):
return array.array(typecode, sequence)
#
# Proxy types used by SyncManager
#
class IteratorProxy(BaseProxy):
# XXX remove methods for Py3.0 and Py2.6
_exposed_ = ('__next__', 'next', 'send', 'throw', 'close')
def __iter__(self):
return self
def __next__(self, *args):
return self._callmethod('__next__', args)
def next(self, *args):
return self._callmethod('next', args)
def send(self, *args):
return self._callmethod('send', args)
def throw(self, *args):
return self._callmethod('throw', args)
def close(self, *args):
return self._callmethod('close', args)
class AcquirerProxy(BaseProxy):
_exposed_ = ('acquire', 'release')
def acquire(self, blocking=True):
return self._callmethod('acquire', (blocking,))
def release(self):
return self._callmethod('release')
def __enter__(self):
return self._callmethod('acquire')
def __exit__(self, exc_type, exc_val, exc_tb):
return self._callmethod('release')
class ConditionProxy(AcquirerProxy):
# XXX will Condition.notfyAll() name be available in Py3.0?
_exposed_ = ('acquire', 'release', 'wait', 'notify', 'notifyAll')
def wait(self, timeout=None):
return self._callmethod('wait', (timeout,))
def notify(self):
return self._callmethod('notify')
def notify_all(self):
return self._callmethod('notifyAll')
class EventProxy(BaseProxy):
# XXX will Event.isSet name be available in Py3.0?
_exposed_ = ('isSet', 'set', 'clear', 'wait')
def is_set(self):
return self._callmethod('isSet')
def set(self):
return self._callmethod('set')
def clear(self):
return self._callmethod('clear')
def wait(self, timeout=None):
return self._callmethod('wait', (timeout,))
class NamespaceProxy(BaseProxy):
_exposed_ = ('__getattribute__', '__setattr__', '__delattr__')
def __getattr__(self, key):
if key[0] == '_':
return object.__getattribute__(self, key)
callmethod = object.__getattribute__(self, '_callmethod')
return callmethod('__getattribute__', (key,))
def __setattr__(self, key, value):
if key[0] == '_':
return object.__setattr__(self, key, value)
callmethod = object.__getattribute__(self, '_callmethod')
return callmethod('__setattr__', (key, value))
def __delattr__(self, key):
if key[0] == '_':
return object.__delattr__(self, key)
callmethod = object.__getattribute__(self, '_callmethod')
return callmethod('__delattr__', (key,))
class ValueProxy(BaseProxy):
_exposed_ = ('get', 'set')
def get(self):
return self._callmethod('get')
def set(self, value):
return self._callmethod('set', (value,))
value = property(get, set)
BaseListProxy = MakeProxyType('BaseListProxy', (
'__add__', '__contains__', '__delitem__', '__delslice__',
'__getitem__', '__getslice__', '__len__', '__mul__',
'__reversed__', '__rmul__', '__setitem__', '__setslice__',
'append', 'count', 'extend', 'index', 'insert', 'pop', 'remove',
'reverse', 'sort', '__imul__'
)) # XXX __getslice__ and __setslice__ unneeded in Py3.0
class ListProxy(BaseListProxy):
def __iadd__(self, value):
self._callmethod('extend', (value,))
return self
def __imul__(self, value):
self._callmethod('__imul__', (value,))
return self
DictProxy = MakeProxyType('DictProxy', (
'__contains__', '__delitem__', '__getitem__', '__len__',
'__setitem__', 'clear', 'copy', 'get', 'has_key', 'items',
'keys', 'pop', 'popitem', 'setdefault', 'update', 'values'
))
ArrayProxy = MakeProxyType('ArrayProxy', (
'__len__', '__getitem__', '__setitem__', '__getslice__', '__setslice__'
)) # XXX __getslice__ and __setslice__ unneeded in Py3.0
PoolProxy = MakeProxyType('PoolProxy', (
'apply', 'apply_async', 'close', 'imap', 'imap_unordered', 'join',
'map', 'map_async', 'terminate'
))
PoolProxy._method_to_typeid_ = {
'apply_async': 'AsyncResult',
'map_async': 'AsyncResult',
'imap': 'Iterator',
'imap_unordered': 'Iterator'
}
#
# Definition of SyncManager
#
class SyncManager(BaseManager):
'''
Subclass of `BaseManager` which supports a number of shared object types.
The types registered are those intended for the synchronization
of threads, plus `dict`, `list` and `Namespace`.
The `multiprocessing.Manager()` function creates started instances of
this class.
'''
SyncManager.register('Queue', Queue.Queue)
SyncManager.register('JoinableQueue', Queue.Queue)
SyncManager.register('Event', threading.Event, EventProxy)
SyncManager.register('Lock', threading.Lock, AcquirerProxy)
SyncManager.register('RLock', threading.RLock, AcquirerProxy)
SyncManager.register('Semaphore', threading.Semaphore, AcquirerProxy)
SyncManager.register('BoundedSemaphore', threading.BoundedSemaphore,
AcquirerProxy)
SyncManager.register('Condition', threading.Condition, ConditionProxy)
SyncManager.register('Pool', Pool, PoolProxy)
SyncManager.register('list', list, ListProxy)
SyncManager.register('dict', dict, DictProxy)
SyncManager.register('Value', Value, ValueProxy)
SyncManager.register('Array', Array, ArrayProxy)
SyncManager.register('Namespace', Namespace, NamespaceProxy)
# types returned by methods of PoolProxy
SyncManager.register('Iterator', proxytype=IteratorProxy, create_method=False)
SyncManager.register('AsyncResult', create_method=False)
#
# Module providing the `Pool` class for managing a process pool
#
# multiprocessing/pool.py
#
# Copyright (c) 2007-2008, R Oudkerk --- see COPYING.txt
#
__all__ = ['Pool']
#
# Imports
#
import threading
import Queue
import itertools
import collections
import time
from multiprocessing import Process, cpu_count, TimeoutError
from multiprocessing.util import Finalize, debug
#
# Constants representing the state of a pool
#
RUN = 0
CLOSE = 1
TERMINATE = 2
#
# Miscellaneous
#
job_counter = itertools.count()
def mapstar(args):
return map(*args)
#
# Code run by worker processes
#
def worker(inqueue, outqueue, initializer=None, initargs=()):
put = outqueue.put
get = inqueue.get
if hasattr(inqueue, '_writer'):
inqueue._writer.close()
outqueue._reader.close()
if initializer is not None:
initializer(*initargs)
while 1:
try:
task = get()
except (EOFError, IOError):
debug('worker got EOFError or IOError -- exiting')
break
if task is None:
debug('worker got sentinel -- exiting')
break
job, i, func, args, kwds = task
try:
result = (True, func(*args, **kwds))
except Exception, e:
result = (False, e)
put((job, i, result))
#
# Class representing a process pool
#
class Pool(object):
'''
Class which supports an async version of the `apply()` builtin
'''
Process = Process
def __init__(self, processes=None, initializer=None, initargs=()):
self._setup_queues()
self._taskqueue = Queue.Queue()
self._cache = {}
self._state = RUN
if processes is None:
try:
processes = cpu_count()
except NotImplementedError:
processes = 1
self._pool = []
for i in range(processes):
w = self.Process(
target=worker,
args=(self._inqueue, self._outqueue, initializer, initargs)
)
self._pool.append(w)
w.set_name(w.get_name().replace('Process', 'PoolWorker'))
w.set_daemon(True)
w.start()
self._task_handler = threading.Thread(
target=Pool._handle_tasks,
args=(self._taskqueue, self._quick_put, self._outqueue, self._pool)
)
self._task_handler.setDaemon(True)
self._task_handler._state = RUN
self._task_handler.start()
self._result_handler = threading.Thread(
target=Pool._handle_results,
args=(self._outqueue, self._quick_get, self._cache)
)
self._result_handler.setDaemon(True)
self._result_handler._state = RUN
self._result_handler.start()
self._terminate = Finalize(
self, self._terminate_pool,
args=(self._taskqueue, self._inqueue, self._outqueue, self._pool,
self._task_handler, self._result_handler, self._cache),
exitpriority=15
)
def _setup_queues(self):
from .queues import SimpleQueue
self._inqueue = SimpleQueue()
self._outqueue = SimpleQueue()
self._quick_put = self._inqueue._writer.send
self._quick_get = self._outqueue._reader.recv
def apply(self, func, args=(), kwds={}):
'''
Equivalent of `apply()` builtin
'''
assert self._state == RUN
return self.apply_async(func, args, kwds).get()
def map(self, func, iterable, chunksize=None):
'''
Equivalent of `map()` builtin
'''
assert self._state == RUN
return self.map_async(func, iterable, chunksize).get()
def imap(self, func, iterable, chunksize=1):
'''
Equivalent of `itertool.imap()` -- can be MUCH slower than `Pool.map()`
'''
assert self._state == RUN
if chunksize == 1:
result = IMapIterator(self._cache)
self._taskqueue.put((((result._job, i, func, (x,), {})
for i, x in enumerate(iterable)), result._set_length))
return result
else:
assert chunksize > 1
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = IMapIterator(self._cache)
self._taskqueue.put((((result._job, i, mapstar, (x,), {})
for i, x in enumerate(task_batches)), result._set_length))
return (item for chunk in result for item in chunk)
def imap_unordered(self, func, iterable, chunksize=1):
'''
Like `imap()` method but ordering of results is arbitrary
'''
assert self._state == RUN
if chunksize == 1:
result = IMapUnorderedIterator(self._cache)
self._taskqueue.put((((result._job, i, func, (x,), {})
for i, x in enumerate(iterable)), result._set_length))
return result
else:
assert chunksize > 1
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = IMapUnorderedIterator(self._cache)
self._taskqueue.put((((result._job, i, mapstar, (x,), {})
for i, x in enumerate(task_batches)), result._set_length))
return (item for chunk in result for item in chunk)
def apply_async(self, func, args=(), kwds={}, callback=None):
'''
Asynchronous equivalent of `apply()` builtin
'''
assert self._state == RUN
result = ApplyResult(self._cache, callback)
self._taskqueue.put(([(result._job, None, func, args, kwds)], None))
return result
def map_async(self, func, iterable, chunksize=None, callback=None):
'''
Asynchronous equivalent of `map()` builtin
'''
assert self._state == RUN
if not hasattr(iterable, '__len__'):
iterable = list(iterable)
if chunksize is None:
chunksize, extra = divmod(len(iterable), len(self._pool) * 4)
if extra:
chunksize += 1
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = MapResult(self._cache, chunksize, len(iterable), callback)
self._taskqueue.put((((result._job, i, mapstar, (x,), {})
for i, x in enumerate(task_batches)), None))
return result
@staticmethod
def _handle_tasks(taskqueue, put, outqueue, pool):
thread = threading.currentThread()
for taskseq, set_length in iter(taskqueue.get, None):
i = -1
for i, task in enumerate(taskseq):
if thread._state:
debug('task handler found thread._state != RUN')
break
try:
put(task)
except IOError:
debug('could not put task on queue')
break
else:
if set_length:
debug('doing set_length()')
set_length(i+1)
continue
break
else:
debug('task handler got sentinel')
try:
# tell result handler to finish when cache is empty
debug('task handler sending sentinel to result handler')
outqueue.put(None)
# tell workers there is no more work
debug('task handler sending sentinel to workers')
for p in pool:
put(None)
except IOError:
debug('task handler got IOError when sending sentinels')
debug('task handler exiting')
@staticmethod
def _handle_results(outqueue, get, cache):
thread = threading.currentThread()
while 1:
try:
task = get()
except (IOError, EOFError):
debug('result handler got EOFError/IOError -- exiting')
return
if thread._state:
assert thread._state == TERMINATE
debug('result handler found thread._state=TERMINATE')
break
if task is None:
debug('result handler got sentinel')
break
job, i, obj = task
try:
cache[job]._set(i, obj)
except KeyError:
pass
while cache and thread._state != TERMINATE:
try:
task = get()
except (IOError, EOFError):
debug('result handler got EOFError/IOError -- exiting')
return
if task is None:
debug('result handler ignoring extra sentinel')
continue
job, i, obj = task
try:
cache[job]._set(i, obj)
except KeyError:
pass
if hasattr(outqueue, '_reader'):
debug('ensuring that outqueue is not full')
# If we don't make room available in outqueue then
# attempts to add the sentinel (None) to outqueue may
# block. There is guaranteed to be no more than 2 sentinels.
try:
for i in range(10):
if not outqueue._reader.poll():
break
get()
except (IOError, EOFError):
pass
debug('result handler exiting: len(cache)=%s, thread._state=%s',
len(cache), thread._state)
@staticmethod
def _get_tasks(func, it, size):
it = iter(it)
while 1:
x = tuple(itertools.islice(it, size))
if not x:
return
yield (func, x)
def __reduce__(self):
raise NotImplementedError(
'pool objects cannot be passed between processes or pickled'
)
def close(self):
debug('closing pool')
if self._state == RUN:
self._state = CLOSE
self._taskqueue.put(None)
def terminate(self):
debug('terminating pool')
self._state = TERMINATE
self._terminate()
def join(self):
debug('joining pool')
assert self._state in (CLOSE, TERMINATE)
self._task_handler.join()
self._result_handler.join()
for p in self._pool:
p.join()
@staticmethod
def _help_stuff_finish(inqueue, task_handler, size):
# task_handler may be blocked trying to put items on inqueue
debug('removing tasks from inqueue until task handler finished')
inqueue._rlock.acquire()
while task_handler.isAlive() and inqueue._reader.poll():
inqueue._reader.recv()
time.sleep(0)
@classmethod
def _terminate_pool(cls, taskqueue, inqueue, outqueue, pool,
task_handler, result_handler, cache):
# this is guaranteed to only be called once
debug('finalizing pool')
task_handler._state = TERMINATE
taskqueue.put(None) # sentinel
debug('helping task handler/workers to finish')
cls._help_stuff_finish(inqueue, task_handler, len(pool))
assert result_handler.isAlive() or len(cache) == 0
result_handler._state = TERMINATE
outqueue.put(None) # sentinel
if pool and hasattr(pool[0], 'terminate'):
debug('terminating workers')
for p in pool:
p.terminate()
debug('joining task handler')
task_handler.join(1e100)
debug('joining result handler')
result_handler.join(1e100)
if pool and hasattr(pool[0], 'terminate'):
debug('joining pool workers')
for p in pool:
p.join()
#
# Class whose instances are returned by `Pool.apply_async()`
#
class ApplyResult(object):
def __init__(self, cache, callback):
self._cond = threading.Condition(threading.Lock())
self._job = job_counter.next()
self._cache = cache
self._ready = False
self._callback = callback
cache[self._job] = self
def ready(self):
return self._ready
def successful(self):
assert self._ready
return self._success
def wait(self, timeout=None):
self._cond.acquire()
try:
if not self._ready:
self._cond.wait(timeout)
finally:
self._cond.release()
def get(self, timeout=None):
self.wait(timeout)
if not self._ready:
raise TimeoutError
if self._success:
return self._value
else:
raise self._value
def _set(self, i, obj):
self._success, self._value = obj
if self._callback and self._success:
self._callback(self._value)
self._cond.acquire()
try:
self._ready = True
self._cond.notify()
finally:
self._cond.release()
del self._cache[self._job]
#
# Class whose instances are returned by `Pool.map_async()`
#
class MapResult(ApplyResult):
def __init__(self, cache, chunksize, length, callback):
ApplyResult.__init__(self, cache, callback)
self._success = True
self._value = [None] * length
self._chunksize = chunksize
if chunksize <= 0:
self._number_left = 0
self._ready = True
else:
self._number_left = length//chunksize + bool(length % chunksize)
def _set(self, i, success_result):
success, result = success_result
if success:
self._value[i*self._chunksize:(i+1)*self._chunksize] = result
self._number_left -= 1
if self._number_left == 0:
if self._callback:
self._callback(self._value)
del self._cache[self._job]
self._cond.acquire()
try:
self._ready = True
self._cond.notify()
finally:
self._cond.release()
else:
self._success = False
self._value = result
del self._cache[self._job]
self._cond.acquire()
try:
self._ready = True
self._cond.notify()
finally:
self._cond.release()
#
# Class whose instances are returned by `Pool.imap()`
#
class IMapIterator(object):
def __init__(self, cache):
self._cond = threading.Condition(threading.Lock())
self._job = job_counter.next()
self._cache = cache
self._items = collections.deque()
self._index = 0
self._length = None
self._unsorted = {}
cache[self._job] = self
def __iter__(self):
return self
def next(self, timeout=None):
self._cond.acquire()
try:
try:
item = self._items.popleft()
except IndexError:
if self._index == self._length:
raise StopIteration
self._cond.wait(timeout)
try:
item = self._items.popleft()
except IndexError:
if self._index == self._length:
raise StopIteration
raise TimeoutError
finally:
self._cond.release()
success, value = item
if success:
return value
raise value
__next__ = next # XXX
def _set(self, i, obj):
self._cond.acquire()
try:
if self._index == i:
self._items.append(obj)
self._index += 1
while self._index in self._unsorted:
obj = self._unsorted.pop(self._index)
self._items.append(obj)
self._index += 1
self._cond.notify()
else:
self._unsorted[i] = obj
if self._index == self._length:
del self._cache[self._job]
finally:
self._cond.release()
def _set_length(self, length):
self._cond.acquire()
try:
self._length = length
if self._index == self._length:
self._cond.notify()
del self._cache[self._job]
finally:
self._cond.release()
#
# Class whose instances are returned by `Pool.imap_unordered()`
#
class IMapUnorderedIterator(IMapIterator):
def _set(self, i, obj):
self._cond.acquire()
try:
self._items.append(obj)
self._index += 1
self._cond.notify()
if self._index == self._length:
del self._cache[self._job]
finally:
self._cond.release()
#
#
#
class ThreadPool(Pool):
from .dummy import Process
def __init__(self, processes=None, initializer=None, initargs=()):
Pool.__init__(self, processes, initializer, initargs)
def _setup_queues(self):
self._inqueue = Queue.Queue()
self._outqueue = Queue.Queue()
self._quick_put = self._inqueue.put
self._quick_get = self._outqueue.get
@staticmethod
def _help_stuff_finish(inqueue, task_handler, size):
# put sentinels at head of inqueue to make workers finish
inqueue.not_empty.acquire()
try:
inqueue.queue.clear()
inqueue.queue.extend([None] * size)
inqueue.not_empty.notifyAll()
finally:
inqueue.not_empty.release()
#
# Module providing the `Process` class which emulates `threading.Thread`
#
# multiprocessing/process.py
#
# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
#
__all__ = ['Process', 'current_process', 'active_children']
#
# Imports
#
import os
import sys
import signal
import itertools
#
#
#
try:
ORIGINAL_DIR = os.path.abspath(os.getcwd())
except OSError:
ORIGINAL_DIR = None
try:
bytes
except NameError:
bytes = str # XXX not needed in Py2.6 and Py3.0
#
# Public functions
#
def current_process():
'''
Return process object representing the current process
'''
return _current_process
def active_children():
'''
Return list of process objects corresponding to live child processes
'''
_cleanup()
return list(_current_process._children)
#
#
#
def _cleanup():
# check for processes which have finished
for p in list(_current_process._children):
if p._popen.poll() is not None:
_current_process._children.discard(p)
#
# The `Process` class
#
class Process(object):
'''
Process objects represent activity that is run in a separate process
The class is analagous to `threading.Thread`
'''
_Popen = None
def __init__(self, group=None, target=None, name=None, args=(), kwargs={}):
assert group is None, 'group argument must be None for now'
count = _current_process._counter.next()
self._identity = _current_process._identity + (count,)
self._authkey = _current_process._authkey
self._daemonic = _current_process._daemonic
self._tempdir = _current_process._tempdir
self._parent_pid = os.getpid()
self._popen = None
self._target = target
self._args = tuple(args)
self._kwargs = dict(kwargs)
self._name = name or type(self).__name__ + '-' + \
':'.join(str(i) for i in self._identity)
def run(self):
'''
Method to be run in sub-process; can be overridden in sub-class
'''
if self._target:
self._target(*self._args, **self._kwargs)
def start(self):
'''
Start child process
'''
assert self._popen is None, 'cannot start a process twice'
assert self._parent_pid == os.getpid(), \
'can only start a process object created by current process'
assert not _current_process._daemonic, \
'daemonic processes are not allowed to have children'
_cleanup()
if self._Popen is not None:
Popen = self._Popen
else:
from .forking import Popen
self._popen = Popen(self)
_current_process._children.add(self)
def terminate(self):
'''
Terminate process; sends SIGTERM signal or uses TerminateProcess()
'''
self._popen.terminate()
def join(self, timeout=None):
'''
Wait until child process terminates
'''
assert self._parent_pid == os.getpid(), 'can only join a child process'
assert self._popen is not None, 'can only join a started process'
res = self._popen.wait(timeout)
if res is not None:
_current_process._children.discard(self)
def is_alive(self):
'''
Return whether process is alive
'''
if self is _current_process:
return True
assert self._parent_pid == os.getpid(), 'can only test a child process'
if self._popen is None:
return False
self._popen.poll()
return self._popen.returncode is None
def get_name(self):
'''
Return name of process
'''
return self._name
def set_name(self, name):
'''
Set name of process
'''
assert isinstance(name, str), 'name must be a string'
self._name = name
def is_daemon(self):
'''
Return whether process is a daemon
'''
return self._daemonic
def set_daemon(self, daemonic):
'''
Set whether process is a daemon
'''
assert self._popen is None, 'process has already started'
self._daemonic = daemonic
def get_authkey(self):
'''
Return authorization key of process
'''
return self._authkey
def set_authkey(self, authkey):
'''
Set authorization key of process
'''
self._authkey = AuthenticationString(authkey)
def get_exitcode(self):
'''
Return exit code of process or `None` if it has yet to stop
'''
if self._popen is None:
return self._popen
return self._popen.poll()
def get_ident(self):
'''
Return indentifier (PID) of process or `None` if it has yet to start
'''
if self is _current_process:
return os.getpid()
else:
return self._popen and self._popen.pid
pid = property(get_ident)
def __repr__(self):
if self is _current_process:
status = 'started'
elif self._parent_pid != os.getpid():
status = 'unknown'
elif self._popen is None:
status = 'initial'
else:
if self._popen.poll() is not None:
status = self.get_exitcode()
else:
status = 'started'
if type(status) is int:
if status == 0:
status = 'stopped'
else:
status = 'stopped[%s]' % _exitcode_to_name.get(status, status)
return '<%s(%s, %s%s)>' % (type(self).__name__, self._name,
status, self._daemonic and ' daemon' or '')
##
def _bootstrap(self):
from . import util
global _current_process
try:
self._children = set()
self._counter = itertools.count(1)
try:
os.close(sys.stdin.fileno())
except (OSError, ValueError):
pass
_current_process = self
util._finalizer_registry.clear()
util._run_after_forkers()
util.info('child process calling self.run()')
try:
self.run()
exitcode = 0
finally:
util._exit_function()
except SystemExit, e:
if not e.args:
exitcode = 1
elif type(e.args[0]) is int:
exitcode = e.args[0]
else:
sys.stderr.write(e.args[0] + '\n')
sys.stderr.flush()
exitcode = 1
except:
exitcode = 1
import traceback
sys.stderr.write('Process %s:\n' % self.get_name())
sys.stderr.flush()
traceback.print_exc()
util.info('process exiting with exitcode %d' % exitcode)
return exitcode
#
# We subclass bytes to avoid accidental transmission of auth keys over network
#
class AuthenticationString(bytes):
def __reduce__(self):
from .forking import Popen
if not Popen.thread_is_spawning():
raise TypeError(
'Pickling an AuthenticationString object is '
'disallowed for security reasons'
)
return AuthenticationString, (bytes(self),)
#
# Create object representing the main process
#
class _MainProcess(Process):
def __init__(self):
self._identity = ()
self._daemonic = False
self._name = 'MainProcess'
self._parent_pid = None
self._popen = None
self._counter = itertools.count(1)
self._children = set()
self._authkey = AuthenticationString(os.urandom(32))
self._tempdir = None
_current_process = _MainProcess()
del _MainProcess
#
# Give names to some return codes
#
_exitcode_to_name = {}
for name, signum in signal.__dict__.items():
if name[:3]=='SIG' and '_' not in name:
_exitcode_to_name[-signum] = name
#
# Module implementing queues
#
# multiprocessing/queues.py
#
# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
#
__all__ = ['Queue', 'SimpleQueue']
import sys
import os
import threading
import collections
import time
import atexit
import weakref
from Queue import Empty, Full
import _multiprocessing
from multiprocessing import Pipe
from multiprocessing.synchronize import Lock, BoundedSemaphore, Semaphore, Condition
from multiprocessing.util import debug, info, Finalize, register_after_fork
from multiprocessing.forking import assert_spawning
#
# Queue type using a pipe, buffer and thread
#
class Queue(object):
def __init__(self, maxsize=0):
if maxsize <= 0:
maxsize = _multiprocessing.SemLock.SEM_VALUE_MAX
self._maxsize = maxsize
self._reader, self._writer = Pipe(duplex=False)
self._rlock = Lock()
self._opid = os.getpid()
if sys.platform == 'win32':
self._wlock = None
else:
self._wlock = Lock()
self._sem = BoundedSemaphore(maxsize)
self._after_fork()
if sys.platform != 'win32':
register_after_fork(self, Queue._after_fork)
def __getstate__(self):
assert_spawning(self)
return (self._maxsize, self._reader, self._writer,
self._rlock, self._wlock, self._sem, self._opid)
def __setstate__(self, state):
(self._maxsize, self._reader, self._writer,
self._rlock, self._wlock, self._sem, self._opid) = state
self._after_fork()
def _after_fork(self):
debug('Queue._after_fork()')
self._notempty = threading.Condition(threading.Lock())
self._buffer = collections.deque()
self._thread = None
self._jointhread = None
self._joincancelled = False
self._closed = False
self._close = None
self._send = self._writer.send
self._recv = self._reader.recv
self._poll = self._reader.poll
def put(self, obj, block=True, timeout=None):
assert not self._closed
if not self._sem.acquire(block, timeout):
raise Full
self._notempty.acquire()
try:
if self._thread is None:
self._start_thread()
self._buffer.append(obj)
self._notempty.notify()
finally:
self._notempty.release()
def get(self, block=True, timeout=None):
if block and timeout is None:
self._rlock.acquire()
try:
res = self._recv()
self._sem.release()
return res
finally:
self._rlock.release()
else:
if block:
deadline = time.time() + timeout
if not self._rlock.acquire(block, timeout):
raise Empty
try:
if not self._poll(block and (deadline-time.time()) or 0.0):
raise Empty
res = self._recv()
self._sem.release()
return res
finally:
self._rlock.release()
def qsize(self):
# Raises NotImplementError on Mac OSX because of broken sem_getvalue()
return self._maxsize - self._sem._semlock._get_value()
def empty(self):
return not self._poll()
def full(self):
return self._sem._semlock._is_zero()
def get_nowait(self):
return self.get(False)
def put_nowait(self, obj):
return self.put(obj, False)
def close(self):
self._closed = True
self._reader.close()
if self._close:
self._close()
def join_thread(self):
debug('Queue.join_thread()')
assert self._closed
if self._jointhread:
self._jointhread()
def cancel_join_thread(self):
debug('Queue.cancel_join_thread()')
self._joincancelled = True
try:
self._jointhread.cancel()
except AttributeError:
pass
def _start_thread(self):
debug('Queue._start_thread()')
# Start thread which transfers data from buffer to pipe
self._buffer.clear()
self._thread = threading.Thread(
target=Queue._feed,
args=(self._buffer, self._notempty, self._send,
self._wlock, self._writer.close),
name='QueueFeederThread'
)
self._thread.setDaemon(True)
debug('doing self._thread.start()')
self._thread.start()
debug('... done self._thread.start()')
# On process exit we will wait for data to be flushed to pipe.
#
# However, if this process created the queue then all
# processes which use the queue will be descendants of this
# process. Therefore waiting for the queue to be flushed
# is pointless once all the child processes have been joined.
created_by_this_process = (self._opid == os.getpid())
if not self._joincancelled and not created_by_this_process:
self._jointhread = Finalize(
self._thread, Queue._finalize_join,
[weakref.ref(self._thread)],
exitpriority=-5
)
# Send sentinel to the thread queue object when garbage collected
self._close = Finalize(
self, Queue._finalize_close,
[self._buffer, self._notempty],
exitpriority=10
)
@staticmethod
def _finalize_join(twr):
debug('joining queue thread')
thread = twr()
if thread is not None:
thread.join()
debug('... queue thread joined')
else:
debug('... queue thread already dead')
@staticmethod
def _finalize_close(buffer, notempty):
debug('telling queue thread to quit')
notempty.acquire()
try:
buffer.append(_sentinel)
notempty.notify()
finally:
notempty.release()
@staticmethod
def _feed(buffer, notempty, send, writelock, close):
debug('starting thread to feed data to pipe')
from .util import is_exiting
nacquire = notempty.acquire
nrelease = notempty.release
nwait = notempty.wait
bpopleft = buffer.popleft
sentinel = _sentinel
if sys.platform != 'win32':
wacquire = writelock.acquire
wrelease = writelock.release
else:
wacquire = None
try:
while 1:
nacquire()
try:
if not buffer:
nwait()
finally:
nrelease()
try:
while 1:
obj = bpopleft()
if obj is sentinel:
debug('feeder thread got sentinel -- exiting')
close()
return
if wacquire is None:
send(obj)
else:
wacquire()
try:
send(obj)
finally:
wrelease()
except IndexError:
pass
except Exception, e:
# Since this runs in a daemon thread the resources it uses
# may be become unusable while the process is cleaning up.
# We ignore errors which happen after the process has
# started to cleanup.
try:
if is_exiting():
info('error in queue thread: %s', e)
else:
import traceback
traceback.print_exc()
except Exception:
pass
_sentinel = object()
#
# A queue type which also supports join() and task_done() methods
#
# Note that if you do not call task_done() for each finished task then
# eventually the counter's semaphore may overflow causing Bad Things
# to happen.
#
class JoinableQueue(Queue):
def __init__(self, maxsize=0):
Queue.__init__(self, maxsize)
self._unfinished_tasks = Semaphore(0)
self._cond = Condition()
def __getstate__(self):
return Queue.__getstate__(self) + (self._cond, self._unfinished_tasks)
def __setstate__(self, state):
Queue.__setstate__(self, state[:-2])
self._cond, self._unfinished_tasks = state[-2:]
def put(self, item, block=True, timeout=None):
Queue.put(self, item, block, timeout)
self._unfinished_tasks.release()
def task_done(self):
self._cond.acquire()
try:
if not self._unfinished_tasks.acquire(False):
raise ValueError('task_done() called too many times')
if self._unfinished_tasks._semlock._is_zero():
self._cond.notify_all()
finally:
self._cond.release()
def join(self):
self._cond.acquire()
try:
if not self._unfinished_tasks._semlock._is_zero():
self._cond.wait()
finally:
self._cond.release()
#
# Simplified Queue type -- really just a locked pipe
#
class SimpleQueue(object):
def __init__(self):
self._reader, self._writer = Pipe(duplex=False)
self._rlock = Lock()
if sys.platform == 'win32':
self._wlock = None
else:
self._wlock = Lock()
self._make_methods()
def empty(self):
return not self._reader.poll()
def __getstate__(self):
assert_spawning(self)
return (self._reader, self._writer, self._rlock, self._wlock)
def __setstate__(self, state):
(self._reader, self._writer, self._rlock, self._wlock) = state
self._make_methods()
def _make_methods(self):
recv = self._reader.recv
racquire, rrelease = self._rlock.acquire, self._rlock.release
def get():
racquire()
try:
return recv()
finally:
rrelease()
self.get = get
if self._wlock is None:
# writes to a message oriented win32 pipe are atomic
self.put = self._writer.send
else:
send = self._writer.send
wacquire, wrelease = self._wlock.acquire, self._wlock.release
def put(obj):
wacquire()
try:
return send(obj)
finally:
wrelease()
self.put = put
#
# Module to allow connection and socket objects to be transferred
# between processes
#
# multiprocessing/reduction.py
#
# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
#
__all__ = []
import os
import sys
import socket
import threading
import copy_reg
import _multiprocessing
from multiprocessing import current_process
from multiprocessing.forking import Popen, duplicate, close
from multiprocessing.util import register_after_fork, debug, sub_debug
from multiprocessing.connection import Client, Listener
#
#
#
if not(sys.platform == 'win32' or hasattr(_multiprocessing, 'recvfd')):
raise ImportError('pickling of connections not supported')
#
# Platform specific definitions
#
if sys.platform == 'win32':
import _subprocess
from ._multiprocessing import win32
def send_handle(conn, handle, destination_pid):
process_handle = win32.OpenProcess(
win32.PROCESS_ALL_ACCESS, False, destination_pid
)
try:
new_handle = duplicate(handle, process_handle)
conn.send(new_handle)
finally:
close(process_handle)
def recv_handle(conn):
return conn.recv()
else:
def send_handle(conn, handle, destination_pid):
_multiprocessing.sendfd(conn.fileno(), handle)
def recv_handle(conn):
return _multiprocessing.recvfd(conn.fileno())
#
# Support for a per-process server thread which caches pickled handles
#
_cache = set()
def _reset(obj):
global _lock, _listener, _cache
for h in _cache:
close(h)
_cache.clear()
_lock = threading.Lock()
_listener = None
_reset(None)
register_after_fork(_reset, _reset)
def _get_listener():
global _listener
if _listener is None:
_lock.acquire()
try:
if _listener is None:
debug('starting listener and thread for sending handles')
_listener = Listener(authkey=current_process().get_authkey())
t = threading.Thread(target=_serve)
t.setDaemon(True)
t.start()
finally:
_lock.release()
return _listener
def _serve():
from .util import is_exiting, sub_warning
while 1:
try:
conn = _listener.accept()
handle_wanted, destination_pid = conn.recv()
_cache.remove(handle_wanted)
send_handle(conn, handle_wanted, destination_pid)
close(handle_wanted)
conn.close()
except:
if not is_exiting():
import traceback
sub_warning(
'thread for sharing handles raised exception :\n' +
'-'*79 + '\n' + traceback.format_exc() + '-'*79
)
#
# Functions to be used for pickling/unpickling objects with handles
#
def reduce_handle(handle):
if Popen.thread_is_spawning():
return (None, Popen.duplicate_for_child(handle), True)
dup_handle = duplicate(handle)
_cache.add(dup_handle)
sub_debug('reducing handle %d', handle)
return (_get_listener().address, dup_handle, False)
def rebuild_handle(pickled_data):
address, handle, inherited = pickled_data
if inherited:
return handle
sub_debug('rebuilding handle %d', handle)
conn = Client(address, authkey=current_process().get_authkey())
conn.send((handle, os.getpid()))
new_handle = recv_handle(conn)
conn.close()
return new_handle
#
# Register `_multiprocessing.Connection` with `copy_reg`
#
def reduce_connection(conn):
rh = reduce_handle(conn.fileno())
return rebuild_connection, (rh, conn.readable, conn.writable)
def rebuild_connection(reduced_handle, readable, writable):
handle = rebuild_handle(reduced_handle)
return _multiprocessing.Connection(
handle, readable=readable, writable=writable
)
copy_reg.pickle(_multiprocessing.Connection, reduce_connection)
#
# Register `socket.socket` with `copy_reg`
#
def fromfd(fd, family, type_, proto=0):
s = socket.fromfd(fd, family, type_, proto)
if s.__class__ is not socket.socket:
s = socket.socket(_sock=s)
return s
def reduce_socket(s):
reduced_handle = reduce_handle(s.fileno())
return rebuild_socket, (reduced_handle, s.family, s.type, s.proto)
def rebuild_socket(reduced_handle, family, type_, proto):
fd = rebuild_handle(reduced_handle)
_sock = fromfd(fd, family, type_, proto)
close(fd)
return _sock
copy_reg.pickle(socket.socket, reduce_socket)
#
# Register `_multiprocessing.PipeConnection` with `copy_reg`
#
if sys.platform == 'win32':
def reduce_pipe_connection(conn):
rh = reduce_handle(conn.fileno())
return rebuild_pipe_connection, (rh, conn.readable, conn.writable)
def rebuild_pipe_connection(reduced_handle, readable, writable):
handle = rebuild_handle(reduced_handle)
return _multiprocessing.PipeConnection(
handle, readable=readable, writable=writable
)
copy_reg.pickle(_multiprocessing.PipeConnection, reduce_pipe_connection)
#
# Module which supports allocation of ctypes objects from shared memory
#
# multiprocessing/sharedctypes.py
#
# Copyright (c) 2007-2008, R Oudkerk --- see COPYING.txt
#
import sys
import ctypes
import weakref
import copy_reg
from multiprocessing import heap, RLock
from multiprocessing.forking import assert_spawning
__all__ = ['RawValue', 'RawArray', 'Value', 'Array', 'copy', 'synchronized']
#
#
#
typecode_to_type = {
'c': ctypes.c_char, 'u': ctypes.c_wchar,
'b': ctypes.c_byte, 'B': ctypes.c_ubyte,
'h': ctypes.c_short, 'H': ctypes.c_ushort,
'i': ctypes.c_int, 'I': ctypes.c_uint,
'l': ctypes.c_long, 'L': ctypes.c_ulong,
'f': ctypes.c_float, 'd': ctypes.c_double
}
#
#
#
def _new_value(type_):
size = ctypes.sizeof(type_)
wrapper = heap.BufferWrapper(size)
return rebuild_ctype(type_, wrapper, None)
def RawValue(typecode_or_type, *args):
'''
Returns a ctypes object allocated from shared memory
'''
type_ = typecode_to_type.get(typecode_or_type, typecode_or_type)
obj = _new_value(type_)
ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj))
obj.__init__(*args)
return obj
def RawArray(typecode_or_type, size_or_initializer):
'''
Returns a ctypes array allocated from shared memory
'''
type_ = typecode_to_type.get(typecode_or_type, typecode_or_type)
if isinstance(size_or_initializer, int):
type_ = type_ * size_or_initializer
return _new_value(type_)
else:
type_ = type_ * len(size_or_initializer)
result = _new_value(type_)
result.__init__(*size_or_initializer)
return result
def Value(typecode_or_type, *args, **kwds):
'''
Return a synchronization wrapper for a Value
'''
lock = kwds.pop('lock', None)
if kwds:
raise ValueError('unrecognized keyword argument(s): %s' % kwds.keys())
obj = RawValue(typecode_or_type, *args)
if lock is None:
lock = RLock()
assert hasattr(lock, 'acquire')
return synchronized(obj, lock)
def Array(typecode_or_type, size_or_initializer, **kwds):
'''
Return a synchronization wrapper for a RawArray
'''
lock = kwds.pop('lock', None)
if kwds:
raise ValueError('unrecognized keyword argument(s): %s' % kwds.keys())
obj = RawArray(typecode_or_type, size_or_initializer)
if lock is None:
lock = RLock()
assert hasattr(lock, 'acquire')
return synchronized(obj, lock)
def copy(obj):
new_obj = _new_value(type(obj))
ctypes.pointer(new_obj)[0] = obj
return new_obj
def synchronized(obj, lock=None):
assert not isinstance(obj, SynchronizedBase), 'object already synchronized'
if isinstance(obj, ctypes._SimpleCData):
return Synchronized(obj, lock)
elif isinstance(obj, ctypes.Array):
if obj._type_ is ctypes.c_char:
return SynchronizedString(obj, lock)
return SynchronizedArray(obj, lock)
else:
cls = type(obj)
try:
scls = class_cache[cls]
except KeyError:
names = [field[0] for field in cls._fields_]
d = dict((name, make_property(name)) for name in names)
classname = 'Synchronized' + cls.__name__
scls = class_cache[cls] = type(classname, (SynchronizedBase,), d)
return scls(obj, lock)
#
# Functions for pickling/unpickling
#
def reduce_ctype(obj):
assert_spawning(obj)
if isinstance(obj, ctypes.Array):
return rebuild_ctype, (obj._type_, obj._wrapper, obj._length_)
else:
return rebuild_ctype, (type(obj), obj._wrapper, None)
def rebuild_ctype(type_, wrapper, length):
if length is not None:
type_ = type_ * length
if sys.platform == 'win32' and type_ not in copy_reg.dispatch_table:
copy_reg.pickle(type_, reduce_ctype)
obj = type_.from_address(wrapper.get_address())
obj._wrapper = wrapper
return obj
#
# Function to create properties
#
def make_property(name):
try:
return prop_cache[name]
except KeyError:
d = {}
exec template % ((name,)*7) in d
prop_cache[name] = d[name]
return d[name]
template = '''
def get%s(self):
self.acquire()
try:
return self._obj.%s
finally:
self.release()
def set%s(self, value):
self.acquire()
try:
self._obj.%s = value
finally:
self.release()
%s = property(get%s, set%s)
'''
prop_cache = {}
class_cache = weakref.WeakKeyDictionary()
#
# Synchronized wrappers
#
class SynchronizedBase(object):
def __init__(self, obj, lock=None):
self._obj = obj
self._lock = lock or RLock()
self.acquire = self._lock.acquire
self.release = self._lock.release
def __reduce__(self):
assert_spawning(self)
return synchronized, (self._obj, self._lock)
def get_obj(self):
return self._obj
def get_lock(self):
return self._lock
def __repr__(self):
return '<%s wrapper for %s>' % (type(self).__name__, self._obj)
class Synchronized(SynchronizedBase):
value = make_property('value')
class SynchronizedArray(SynchronizedBase):
def __len__(self):
return len(self._obj)
def __getitem__(self, i):
self.acquire()
try:
return self._obj[i]
finally:
self.release()
def __setitem__(self, i, value):
self.acquire()
try:
self._obj[i] = value
finally:
self.release()
def __getslice__(self, start, stop):
self.acquire()
try:
return self._obj[start:stop]
finally:
self.release()
def __setslice__(self, start, stop, values):
self.acquire()
try:
self._obj[start:stop] = values
finally:
self.release()
class SynchronizedString(SynchronizedArray):
value = make_property('value')
raw = make_property('raw')
#
# Module implementing synchronization primitives
#
# multiprocessing/synchronize.py
#
# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
#
__all__ = [
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event'
]
import threading
import os
import sys
from time import time as _time, sleep as _sleep
import _multiprocessing
from multiprocessing.process import current_process
from multiprocessing.util import Finalize, register_after_fork, debug
from multiprocessing.forking import assert_spawning, Popen
#
# Constants
#
RECURSIVE_MUTEX, SEMAPHORE = range(2)
SEM_VALUE_MAX = _multiprocessing.SemLock.SEM_VALUE_MAX
#
# Base class for semaphores and mutexes; wraps `_multiprocessing.SemLock`
#
class SemLock(object):
def __init__(self, kind, value, maxvalue):
sl = self._semlock = _multiprocessing.SemLock(kind, value, maxvalue)
debug('created semlock with handle %s' % sl.handle)
self._make_methods()
if sys.platform != 'win32':
def _after_fork(obj):
obj._semlock._after_fork()
register_after_fork(self, _after_fork)
def _make_methods(self):
self.acquire = self._semlock.acquire
self.release = self._semlock.release
self.__enter__ = self._semlock.__enter__
self.__exit__ = self._semlock.__exit__
def __getstate__(self):
assert_spawning(self)
sl = self._semlock
return (Popen.duplicate_for_child(sl.handle), sl.kind, sl.maxvalue)
def __setstate__(self, state):
self._semlock = _multiprocessing.SemLock._rebuild(*state)
debug('recreated blocker with handle %r' % state[0])
self._make_methods()
#
# Semaphore
#
class Semaphore(SemLock):
def __init__(self, value=1):
SemLock.__init__(self, SEMAPHORE, value, SEM_VALUE_MAX)
def get_value(self):
return self._semlock._get_value()
def __repr__(self):
try:
value = self._semlock._get_value()
except Exception:
value = 'unknown'
return '<Semaphore(value=%s)>' % value
#
# Bounded semaphore
#
class BoundedSemaphore(Semaphore):
def __init__(self, value=1):
SemLock.__init__(self, SEMAPHORE, value, value)
def __repr__(self):
try:
value = self._semlock._get_value()
except Exception:
value = 'unknown'
return '<BoundedSemaphore(value=%s, maxvalue=%s)>' % \
(value, self._semlock.maxvalue)
#
# Non-recursive lock
#
class Lock(SemLock):
def __init__(self):
SemLock.__init__(self, SEMAPHORE, 1, 1)
def __repr__(self):
try:
if self._semlock._is_mine():
name = current_process().get_name()
if threading.currentThread().getName() != 'MainThread':
name += '|' + threading.currentThread().getName()
elif self._semlock._get_value() == 1:
name = 'None'
elif self._semlock._count() > 0:
name = 'SomeOtherThread'
else:
name = 'SomeOtherProcess'
except Exception:
name = 'unknown'
return '<Lock(owner=%s)>' % name
#
# Recursive lock
#
class RLock(SemLock):
def __init__(self):
SemLock.__init__(self, RECURSIVE_MUTEX, 1, 1)
def __repr__(self):
try:
if self._semlock._is_mine():
name = current_process().get_name()
if threading.currentThread().getName() != 'MainThread':
name += '|' + threading.currentThread().getName()
count = self._semlock._count()
elif self._semlock._get_value() == 1:
name, count = 'None', 0
elif self._semlock._count() > 0:
name, count = 'SomeOtherThread', 'nonzero'
else:
name, count = 'SomeOtherProcess', 'nonzero'
except Exception:
name, count = 'unknown', 'unknown'
return '<RLock(%s, %s)>' % (name, count)
#
# Condition variable
#
class Condition(object):
def __init__(self, lock=None):
self._lock = lock or RLock()
self._sleeping_count = Semaphore(0)
self._woken_count = Semaphore(0)
self._wait_semaphore = Semaphore(0)
self._make_methods()
def __getstate__(self):
assert_spawning(self)
return (self._lock, self._sleeping_count,
self._woken_count, self._wait_semaphore)
def __setstate__(self, state):
(self._lock, self._sleeping_count,
self._woken_count, self._wait_semaphore) = state
self._make_methods()
def _make_methods(self):
self.acquire = self._lock.acquire
self.release = self._lock.release
self.__enter__ = self._lock.__enter__
self.__exit__ = self._lock.__exit__
def __repr__(self):
try:
num_waiters = (self._sleeping_count._semlock._get_value() -
self._woken_count._semlock._get_value())
except Exception:
num_waiters = 'unkown'
return '<Condition(%s, %s)>' % (self._lock, num_waiters)
def wait(self, timeout=None):
assert self._lock._semlock._is_mine(), \
'must acquire() condition before using wait()'
# indicate that this thread is going to sleep
self._sleeping_count.release()
# release lock
count = self._lock._semlock._count()
for i in xrange(count):
self._lock.release()
try:
# wait for notification or timeout
self._wait_semaphore.acquire(True, timeout)
finally:
# indicate that this thread has woken
self._woken_count.release()
# reacquire lock
for i in xrange(count):
self._lock.acquire()
def notify(self):
assert self._lock._semlock._is_mine(), 'lock is not owned'
assert not self._wait_semaphore.acquire(False)
# to take account of timeouts since last notify() we subtract
# woken_count from sleeping_count and rezero woken_count
while self._woken_count.acquire(False):
res = self._sleeping_count.acquire(False)
assert res
if self._sleeping_count.acquire(False): # try grabbing a sleeper
self._wait_semaphore.release() # wake up one sleeper
self._woken_count.acquire() # wait for the sleeper to wake
# rezero _wait_semaphore in case a timeout just happened
self._wait_semaphore.acquire(False)
def notify_all(self):
assert self._lock._semlock._is_mine(), 'lock is not owned'
assert not self._wait_semaphore.acquire(False)
# to take account of timeouts since last notify*() we subtract
# woken_count from sleeping_count and rezero woken_count
while self._woken_count.acquire(False):
res = self._sleeping_count.acquire(False)
assert res
sleepers = 0
while self._sleeping_count.acquire(False):
self._wait_semaphore.release() # wake up one sleeper
sleepers += 1
if sleepers:
for i in xrange(sleepers):
self._woken_count.acquire() # wait for a sleeper to wake
# rezero wait_semaphore in case some timeouts just happened
while self._wait_semaphore.acquire(False):
pass
#
# Event
#
class Event(object):
def __init__(self):
self._cond = Condition(Lock())
self._flag = Semaphore(0)
def is_set(self):
self._cond.acquire()
try:
if self._flag.acquire(False):
self._flag.release()
return True
return False
finally:
self._cond.release()
def set(self):
self._cond.acquire()
try:
self._flag.acquire(False)
self._flag.release()
self._cond.notify_all()
finally:
self._cond.release()
def clear(self):
self._cond.acquire()
try:
self._flag.acquire(False)
finally:
self._cond.release()
def wait(self, timeout=None):
self._cond.acquire()
try:
if self._flag.acquire(False):
self._flag.release()
else:
self._cond.wait(timeout)
finally:
self._cond.release()
#
# Module providing various facilities to other parts of the package
#
# multiprocessing/util.py
#
# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
#
import itertools
import weakref
import copy_reg
import atexit
import threading # we want threading to install it's
# cleanup function before multiprocessing does
from multiprocessing.process import current_process, active_children
__all__ = [
'sub_debug', 'debug', 'info', 'sub_warning', 'get_logger',
'log_to_stderr', 'get_temp_dir', 'register_after_fork',
'is_exiting', 'Finalize', 'ForkAwareThreadLock', 'ForkAwareLocal'
]
#
# Logging
#
NOTSET = 0
SUBDEBUG = 5
DEBUG = 10
INFO = 20
SUBWARNING = 25
LOGGER_NAME = 'multiprocessing'
DEFAULT_LOGGING_FORMAT = '[%(levelname)s/%(processName)s] %(message)s'
_logger = None
_log_to_stderr = False
def sub_debug(msg, *args):
if _logger:
_logger.log(SUBDEBUG, msg, *args)
def debug(msg, *args):
if _logger:
_logger.log(DEBUG, msg, *args)
def info(msg, *args):
if _logger:
_logger.log(INFO, msg, *args)
def sub_warning(msg, *args):
if _logger:
_logger.log(SUBWARNING, msg, *args)
def get_logger():
'''
Returns logger used by multiprocessing
'''
global _logger
if not _logger:
import logging, atexit
# XXX multiprocessing should cleanup before logging
if hasattr(atexit, 'unregister'):
atexit.unregister(_exit_function)
atexit.register(_exit_function)
else:
atexit._exithandlers.remove((_exit_function, (), {}))
atexit._exithandlers.append((_exit_function, (), {}))
_check_logger_class()
_logger = logging.getLogger(LOGGER_NAME)
return _logger
def _check_logger_class():
'''
Make sure process name is recorded when loggers are used
'''
# XXX This function is unnecessary once logging is patched
import logging
if hasattr(logging, 'multiprocessing'):
return
logging._acquireLock()
try:
OldLoggerClass = logging.getLoggerClass()
if not getattr(OldLoggerClass, '_process_aware', False):
class ProcessAwareLogger(OldLoggerClass):
_process_aware = True
def makeRecord(self, *args, **kwds):
record = OldLoggerClass.makeRecord(self, *args, **kwds)
record.processName = current_process()._name
return record
logging.setLoggerClass(ProcessAwareLogger)
finally:
logging._releaseLock()
def log_to_stderr(level=None):
'''
Turn on logging and add a handler which prints to stderr
'''
global _log_to_stderr
import logging
logger = get_logger()
formatter = logging.Formatter(DEFAULT_LOGGING_FORMAT)
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
if level is not None:
logger.setLevel(level)
_log_to_stderr = True
#
# Function returning a temp directory which will be removed on exit
#
def get_temp_dir():
# get name of a temp directory which will be automatically cleaned up
if current_process()._tempdir is None:
import shutil, tempfile
tempdir = tempfile.mkdtemp(prefix='pymp-')
info('created temp directory %s', tempdir)
Finalize(None, shutil.rmtree, args=[tempdir], exitpriority=-100)
current_process()._tempdir = tempdir
return current_process()._tempdir
#
# Support for reinitialization of objects when bootstrapping a child process
#
_afterfork_registry = weakref.WeakValueDictionary()
_afterfork_counter = itertools.count()
def _run_after_forkers():
items = list(_afterfork_registry.items())
items.sort()
for (index, ident, func), obj in items:
try:
func(obj)
except Exception, e:
info('after forker raised exception %s', e)
def register_after_fork(obj, func):
_afterfork_registry[(_afterfork_counter.next(), id(obj), func)] = obj
#
# Finalization using weakrefs
#
_finalizer_registry = {}
_finalizer_counter = itertools.count()
class Finalize(object):
'''
Class which supports object finalization using weakrefs
'''
def __init__(self, obj, callback, args=(), kwargs=None, exitpriority=None):
assert exitpriority is None or type(exitpriority) is int
if obj is not None:
self._weakref = weakref.ref(obj, self)
else:
assert exitpriority is not None
self._callback = callback
self._args = args
self._kwargs = kwargs or {}
self._key = (exitpriority, _finalizer_counter.next())
_finalizer_registry[self._key] = self
def __call__(self, wr=None):
'''
Run the callback unless it has already been called or cancelled
'''
try:
del _finalizer_registry[self._key]
except KeyError:
sub_debug('finalizer no longer registered')
else:
sub_debug('finalizer calling %s with args %s and kwargs %s',
self._callback, self._args, self._kwargs)
res = self._callback(*self._args, **self._kwargs)
self._weakref = self._callback = self._args = \
self._kwargs = self._key = None
return res
def cancel(self):
'''
Cancel finalization of the object
'''
try:
del _finalizer_registry[self._key]
except KeyError:
pass
else:
self._weakref = self._callback = self._args = \
self._kwargs = self._key = None
def still_active(self):
'''
Return whether this finalizer is still waiting to invoke callback
'''
return self._key in _finalizer_registry
def __repr__(self):
try:
obj = self._weakref()
except (AttributeError, TypeError):
obj = None
if obj is None:
return '<Finalize object, dead>'
x = '<Finalize object, callback=%s' % \
getattr(self._callback, '__name__', self._callback)
if self._args:
x += ', args=' + str(self._args)
if self._kwargs:
x += ', kwargs=' + str(self._kwargs)
if self._key[0] is not None:
x += ', exitprority=' + str(self._key[0])
return x + '>'
def _run_finalizers(minpriority=None):
'''
Run all finalizers whose exit priority is not None and at least minpriority
Finalizers with highest priority are called first; finalizers with
the same priority will be called in reverse order of creation.
'''
if minpriority is None:
f = lambda p : p[0][0] is not None
else:
f = lambda p : p[0][0] is not None and p[0][0] >= minpriority
items = [x for x in _finalizer_registry.items() if f(x)]
items.sort(reverse=True)
for key, finalizer in items:
sub_debug('calling %s', finalizer)
try:
finalizer()
except Exception:
import traceback
traceback.print_exc()
if minpriority is None:
_finalizer_registry.clear()
#
# Clean up on exit
#
def is_exiting():
'''
Returns true if the process is shutting down
'''
return _exiting or _exiting is None
_exiting = False
def _exit_function():
global _exiting
info('process shutting down')
debug('running all "atexit" finalizers with priority >= 0')
_run_finalizers(0)
for p in active_children():
if p._daemonic:
info('calling terminate() for daemon %s', p.get_name())
p._popen.terminate()
for p in active_children():
info('calling join() for process %s', p.get_name())
p.join()
debug('running the remaining "atexit" finalizers')
_run_finalizers()
atexit.register(_exit_function)
#
# Some fork aware types
#
class ForkAwareThreadLock(object):
def __init__(self):
self._lock = threading.Lock()
self.acquire = self._lock.acquire
self.release = self._lock.release
register_after_fork(self, ForkAwareThreadLock.__init__)
class ForkAwareLocal(threading.local):
def __init__(self):
register_after_fork(self, lambda obj : obj.__dict__.clear())
def __reduce__(self):
return type(self), ()
#
# Try making some callable types picklable
#
def _reduce_method(m):
if m.im_self is None:
return getattr, (m.im_class, m.im_func.func_name)
else:
return getattr, (m.im_self, m.im_func.func_name)
copy_reg.pickle(type(Finalize.__init__), _reduce_method)
def _reduce_method_descriptor(m):
return getattr, (m.__objclass__, m.__name__)
copy_reg.pickle(type(list.append), _reduce_method_descriptor)
copy_reg.pickle(type(int.__add__), _reduce_method_descriptor)
def _reduce_builtin_function_or_method(m):
return getattr, (m.__self__, m.__name__)
copy_reg.pickle(type(list().append), _reduce_builtin_function_or_method)
copy_reg.pickle(type(int().__add__), _reduce_builtin_function_or_method)
try:
from functools import partial
except ImportError:
pass
else:
def _reduce_partial(p):
return _rebuild_partial, (p.func, p.args, p.keywords or {})
def _rebuild_partial(func, args, keywords):
return partial(func, *args, **keywords)
copy_reg.pickle(partial, _reduce_partial)
#
# Unit tests for the multiprocessing package
#
import unittest
import threading
import Queue
import time
import sys
import os
import gc
import signal
import array
import copy
import socket
import random
import logging
import _multiprocessing
import multiprocessing.dummy
import multiprocessing.connection
import multiprocessing.managers
import multiprocessing.heap
import multiprocessing.managers
import multiprocessing.pool
from multiprocessing import util
#
#
#
if sys.version_info >= (3, 0):
def latin(s):
return s.encode('latin')
else:
latin = str
try:
bytes
except NameError:
bytes = str
def bytearray(seq):
return array.array('c', seq)
#
# Constants
#
LOG_LEVEL = util.SUBWARNING
#LOG_LEVEL = logging.WARNING
DELTA = 0.1
CHECK_TIMINGS = False # making true makes tests take a lot longer
# and can sometimes cause some non-serious
# failures because some calls block a bit
# longer than expected
if CHECK_TIMINGS:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4
else:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1
HAVE_GETVALUE = not getattr(_multiprocessing,
'HAVE_BROKEN_SEM_GETVALUE', False)
#
# Creates a wrapper for a function which records the time it takes to finish
#
class TimingWrapper(object):
def __init__(self, func):
self.func = func
self.elapsed = None
def __call__(self, *args, **kwds):
t = time.time()
try:
return self.func(*args, **kwds)
finally:
self.elapsed = time.time() - t
#
# Base class for test cases
#
class BaseTestCase(object):
ALLOWED_TYPES = ('processes', 'manager', 'threads')
def assertTimingAlmostEqual(self, a, b):
if CHECK_TIMINGS:
self.assertAlmostEqual(a, b, 1)
def assertReturnsIfImplemented(self, value, func, *args):
try:
res = func(*args)
except NotImplementedError:
pass
else:
return self.assertEqual(value, res)
#
# Return the value of a semaphore
#
def get_value(self):
try:
return self.get_value()
except AttributeError:
try:
return self._Semaphore__value
except AttributeError:
try:
return self._value
except AttributeError:
raise NotImplementedError
#
# Testcases
#
class _TestProcess(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_current(self):
if self.TYPE == 'threads':
return
current = self.current_process()
authkey = current.get_authkey()
self.assertTrue(current.is_alive())
self.assertTrue(not current.is_daemon())
self.assertTrue(isinstance(authkey, bytes))
self.assertTrue(len(authkey) > 0)
self.assertEqual(current.get_ident(), os.getpid())
self.assertEqual(current.get_exitcode(), None)
def _test(self, q, *args, **kwds):
current = self.current_process()
q.put(args)
q.put(kwds)
q.put(current.get_name())
if self.TYPE != 'threads':
q.put(bytes(current.get_authkey()))
q.put(current.pid)
def test_process(self):
q = self.Queue(1)
e = self.Event()
args = (q, 1, 2)
kwargs = {'hello':23, 'bye':2.54}
name = 'SomeProcess'
p = self.Process(
target=self._test, args=args, kwargs=kwargs, name=name
)
p.set_daemon(True)
current = self.current_process()
if self.TYPE != 'threads':
self.assertEquals(p.get_authkey(), current.get_authkey())
self.assertEquals(p.is_alive(), False)
self.assertEquals(p.is_daemon(), True)
self.assertTrue(p not in self.active_children())
self.assertTrue(type(self.active_children()) is list)
self.assertEqual(p.get_exitcode(), None)
p.start()
self.assertEquals(p.get_exitcode(), None)
self.assertEquals(p.is_alive(), True)
self.assertTrue(p in self.active_children())
self.assertEquals(q.get(), args[1:])
self.assertEquals(q.get(), kwargs)
self.assertEquals(q.get(), p.get_name())
if self.TYPE != 'threads':
self.assertEquals(q.get(), current.get_authkey())
self.assertEquals(q.get(), p.pid)
p.join()
self.assertEquals(p.get_exitcode(), 0)
self.assertEquals(p.is_alive(), False)
self.assertTrue(p not in self.active_children())
def _test_terminate(self):
time.sleep(1000)
def test_terminate(self):
if self.TYPE == 'threads':
return
p = self.Process(target=self._test_terminate)
p.set_daemon(True)
p.start()
self.assertEqual(p.is_alive(), True)
self.assertTrue(p in self.active_children())
self.assertEqual(p.get_exitcode(), None)
p.terminate()
join = TimingWrapper(p.join)
self.assertEqual(join(), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), False)
self.assertTrue(p not in self.active_children())
p.join()
# XXX sometimes get p.get_exitcode() == 0 on Windows ...
#self.assertEqual(p.get_exitcode(), -signal.SIGTERM)
def test_cpu_count(self):
try:
cpus = multiprocessing.cpu_count()
except NotImplementedError:
cpus = 1
self.assertTrue(type(cpus) is int)
self.assertTrue(cpus >= 1)
def test_active_children(self):
self.assertEqual(type(self.active_children()), list)
p = self.Process(target=time.sleep, args=(DELTA,))
self.assertTrue(p not in self.active_children())
p.start()
self.assertTrue(p in self.active_children())
p.join()
self.assertTrue(p not in self.active_children())
def _test_recursion(self, wconn, id):
from multiprocessing import forking
wconn.send(id)
if len(id) < 2:
for i in range(2):
p = self.Process(
target=self._test_recursion, args=(wconn, id+[i])
)
p.start()
p.join()
def test_recursion(self):
rconn, wconn = self.Pipe(duplex=False)
self._test_recursion(wconn, [])
time.sleep(DELTA)
result = []
while rconn.poll():
result.append(rconn.recv())
expected = [
[],
[0],
[0, 0],
[0, 1],
[1],
[1, 0],
[1, 1]
]
self.assertEqual(result, expected)
#
#
#
class _UpperCaser(multiprocessing.Process):
def __init__(self):
multiprocessing.Process.__init__(self)
self.child_conn, self.parent_conn = multiprocessing.Pipe()
def run(self):
self.parent_conn.close()
for s in iter(self.child_conn.recv, None):
self.child_conn.send(s.upper())
self.child_conn.close()
def submit(self, s):
assert type(s) is str
self.parent_conn.send(s)
return self.parent_conn.recv()
def stop(self):
self.parent_conn.send(None)
self.parent_conn.close()
self.child_conn.close()
class _TestSubclassingProcess(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_subclassing(self):
uppercaser = _UpperCaser()
uppercaser.start()
self.assertEqual(uppercaser.submit('hello'), 'HELLO')
self.assertEqual(uppercaser.submit('world'), 'WORLD')
uppercaser.stop()
uppercaser.join()
#
#
#
def queue_empty(q):
if hasattr(q, 'empty'):
return q.empty()
else:
return q.qsize() == 0
def queue_full(q, maxsize):
if hasattr(q, 'full'):
return q.full()
else:
return q.qsize() == maxsize
class _TestQueue(BaseTestCase):
def _test_put(self, queue, child_can_start, parent_can_continue):
child_can_start.wait()
for i in range(6):
queue.get()
parent_can_continue.set()
def test_put(self):
MAXSIZE = 6
queue = self.Queue(maxsize=MAXSIZE)
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_put,
args=(queue, child_can_start, parent_can_continue)
)
proc.set_daemon(True)
proc.start()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
queue.put(1)
queue.put(2, True)
queue.put(3, True, None)
queue.put(4, False)
queue.put(5, False, None)
queue.put_nowait(6)
# the values may be in buffer but not yet in pipe so sleep a bit
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
self.assertEqual(queue_full(queue, MAXSIZE), True)
put = TimingWrapper(queue.put)
put_nowait = TimingWrapper(queue.put_nowait)
self.assertRaises(Queue.Full, put, 7, False)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(Queue.Full, put, 7, False, None)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(Queue.Full, put_nowait, 7)
self.assertTimingAlmostEqual(put_nowait.elapsed, 0)
self.assertRaises(Queue.Full, put, 7, True, TIMEOUT1)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1)
self.assertRaises(Queue.Full, put, 7, False, TIMEOUT2)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(Queue.Full, put, 7, True, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3)
child_can_start.set()
parent_can_continue.wait()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
proc.join()
def _test_get(self, queue, child_can_start, parent_can_continue):
child_can_start.wait()
queue.put(1)
queue.put(2)
queue.put(3)
queue.put(4)
queue.put(5)
parent_can_continue.set()
def test_get(self):
queue = self.Queue()
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_get,
args=(queue, child_can_start, parent_can_continue)
)
proc.set_daemon(True)
proc.start()
self.assertEqual(queue_empty(queue), True)
child_can_start.set()
parent_can_continue.wait()
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
self.assertEqual(queue.get(), 1)
self.assertEqual(queue.get(True, None), 2)
self.assertEqual(queue.get(True), 3)
self.assertEqual(queue.get(timeout=1), 4)
self.assertEqual(queue.get_nowait(), 5)
self.assertEqual(queue_empty(queue), True)
get = TimingWrapper(queue.get)
get_nowait = TimingWrapper(queue.get_nowait)
self.assertRaises(Queue.Empty, get, False)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(Queue.Empty, get, False, None)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(Queue.Empty, get_nowait)
self.assertTimingAlmostEqual(get_nowait.elapsed, 0)
self.assertRaises(Queue.Empty, get, True, TIMEOUT1)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
self.assertRaises(Queue.Empty, get, False, TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(Queue.Empty, get, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3)
proc.join()
def _test_fork(self, queue):
for i in range(10, 20):
queue.put(i)
# note that at this point the items may only be buffered, so the
# process cannot shutdown until the feeder thread has finished
# pushing items onto the pipe.
def test_fork(self):
# Old versions of Queue would fail to create a new feeder
# thread for a forked process if the original process had its
# own feeder thread. This test checks that this no longer
# happens.
queue = self.Queue()
# put items on queue so that main process starts a feeder thread
for i in range(10):
queue.put(i)
# wait to make sure thread starts before we fork a new process
time.sleep(DELTA)
# fork process
p = self.Process(target=self._test_fork, args=(queue,))
p.start()
# check that all expected items are in the queue
for i in range(20):
self.assertEqual(queue.get(), i)
self.assertRaises(Queue.Empty, queue.get, False)
p.join()
def test_qsize(self):
q = self.Queue()
try:
self.assertEqual(q.qsize(), 0)
except NotImplementedError:
return
q.put(1)
self.assertEqual(q.qsize(), 1)
q.put(5)
self.assertEqual(q.qsize(), 2)
q.get()
self.assertEqual(q.qsize(), 1)
q.get()
self.assertEqual(q.qsize(), 0)
def _test_task_done(self, q):
for obj in iter(q.get, None):
time.sleep(DELTA)
q.task_done()
def test_task_done(self):
queue = self.JoinableQueue()
if sys.version_info < (2, 5) and not hasattr(queue, 'task_done'):
return
workers = [self.Process(target=self._test_task_done, args=(queue,))
for i in xrange(4)]
for p in workers:
p.start()
for i in xrange(10):
queue.put(i)
queue.join()
for p in workers:
queue.put(None)
for p in workers:
p.join()
#
#
#
class _TestLock(BaseTestCase):
def test_lock(self):
lock = self.Lock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(False), False)
self.assertEqual(lock.release(), None)
self.assertRaises((ValueError, threading.ThreadError), lock.release)
def test_rlock(self):
lock = self.RLock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertRaises((AssertionError, RuntimeError), lock.release)
class _TestSemaphore(BaseTestCase):
def _test_semaphore(self, sem):
self.assertReturnsIfImplemented(2, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.acquire(False), False)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(2, get_value, sem)
def test_semaphore(self):
sem = self.Semaphore(2)
self._test_semaphore(sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(3, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(4, get_value, sem)
def test_bounded_semaphore(self):
sem = self.BoundedSemaphore(2)
self._test_semaphore(sem)
# Currently fails on OS/X
#if HAVE_GETVALUE:
# self.assertRaises(ValueError, sem.release)
# self.assertReturnsIfImplemented(2, get_value, sem)
def test_timeout(self):
if self.TYPE != 'processes':
return
sem = self.Semaphore(0)
acquire = TimingWrapper(sem.acquire)
self.assertEqual(acquire(False), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, None), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, TIMEOUT1), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0)
self.assertEqual(acquire(True, TIMEOUT2), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2)
self.assertEqual(acquire(timeout=TIMEOUT3), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3)
class _TestCondition(BaseTestCase):
def f(self, cond, sleeping, woken, timeout=None):
cond.acquire()
sleeping.release()
cond.wait(timeout)
woken.release()
cond.release()
def check_invariant(self, cond):
# this is only supposed to succeed when there are no sleepers
if self.TYPE == 'processes':
try:
sleepers = (cond._sleeping_count.get_value() -
cond._woken_count.get_value())
self.assertEqual(sleepers, 0)
self.assertEqual(cond._wait_semaphore.get_value(), 0)
except NotImplementedError:
pass
def test_notify(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.set_daemon(True)
p.start()
p = threading.Thread(target=self.f, args=(cond, sleeping, woken))
p.setDaemon(True)
p.start()
# wait for both children to start sleeping
sleeping.acquire()
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake up one process/thread
cond.acquire()
cond.notify()
cond.release()
# check one process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(1, get_value, woken)
# wake up another
cond.acquire()
cond.notify()
cond.release()
# check other has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(2, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
p.join()
def test_notify_all(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes which will timeout
for i in range(3):
p = self.Process(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
p.set_daemon(True)
p.start()
t = threading.Thread(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
t.setDaemon(True)
t.start()
# wait for them all to sleep
for i in xrange(6):
sleeping.acquire()
# check they have all timed out
for i in xrange(6):
woken.acquire()
self.assertReturnsIfImplemented(0, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
# start some more threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.set_daemon(True)
p.start()
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.setDaemon(True)
t.start()
# wait for them to all sleep
for i in xrange(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake them all up
cond.acquire()
cond.notify_all()
cond.release()
# check they have all woken
time.sleep(DELTA)
self.assertReturnsIfImplemented(6, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
def test_timeout(self):
cond = self.Condition()
wait = TimingWrapper(cond.wait)
cond.acquire()
res = wait(TIMEOUT1)
cond.release()
self.assertEqual(res, None)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
class _TestEvent(BaseTestCase):
def _test_event(self, event):
time.sleep(TIMEOUT2)
event.set()
def test_event(self):
event = self.Event()
wait = TimingWrapper(event.wait)
# Removed temporaily, due to API shear, this does not
# work with threading._Event objects. is_set == isSet
#self.assertEqual(event.is_set(), False)
self.assertEqual(wait(0.0), None)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), None)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
event.set()
# See note above on the API differences
# self.assertEqual(event.is_set(), True)
self.assertEqual(wait(), None)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), None)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
# self.assertEqual(event.is_set(), True)
event.clear()
#self.assertEqual(event.is_set(), False)
self.Process(target=self._test_event, args=(event,)).start()
self.assertEqual(wait(), None)
#
#
#
class _TestValue(BaseTestCase):
codes_values = [
('i', 4343, 24234),
('d', 3.625, -4.25),
('h', -232, 234),
('c', latin('x'), latin('y'))
]
def _test(self, values):
for sv, cv in zip(values, self.codes_values):
sv.value = cv[2]
def test_value(self, raw=False):
if self.TYPE != 'processes':
return
if raw:
values = [self.RawValue(code, value)
for code, value, _ in self.codes_values]
else:
values = [self.Value(code, value)
for code, value, _ in self.codes_values]
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[1])
proc = self.Process(target=self._test, args=(values,))
proc.start()
proc.join()
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[2])
def test_rawvalue(self):
self.test_value(raw=True)
def test_getobj_getlock(self):
if self.TYPE != 'processes':
return
val1 = self.Value('i', 5)
lock1 = val1.get_lock()
obj1 = val1.get_obj()
val2 = self.Value('i', 5, lock=None)
lock2 = val2.get_lock()
obj2 = val2.get_obj()
lock = self.Lock()
val3 = self.Value('i', 5, lock=lock)
lock3 = val3.get_lock()
obj3 = val3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.RawValue('i', 5)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
class _TestArray(BaseTestCase):
def f(self, seq):
for i in range(1, len(seq)):
seq[i] += seq[i-1]
def test_array(self, raw=False):
if self.TYPE != 'processes':
return
seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831]
if raw:
arr = self.RawArray('i', seq)
else:
arr = self.Array('i', seq)
self.assertEqual(len(arr), len(seq))
self.assertEqual(arr[3], seq[3])
self.assertEqual(list(arr[2:7]), list(seq[2:7]))
arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4])
self.assertEqual(list(arr[:]), seq)
self.f(seq)
p = self.Process(target=self.f, args=(arr,))
p.start()
p.join()
self.assertEqual(list(arr[:]), seq)
def test_rawarray(self):
self.test_array(raw=True)
def test_getobj_getlock_obj(self):
if self.TYPE != 'processes':
return
arr1 = self.Array('i', range(10))
lock1 = arr1.get_lock()
obj1 = arr1.get_obj()
arr2 = self.Array('i', range(10), lock=None)
lock2 = arr2.get_lock()
obj2 = arr2.get_obj()
lock = self.Lock()
arr3 = self.Array('i', range(10), lock=lock)
lock3 = arr3.get_lock()
obj3 = arr3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.RawArray('i', range(10))
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
#
#
#
class _TestContainers(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_list(self):
a = self.list(range(10))
self.assertEqual(a[:], range(10))
b = self.list()
self.assertEqual(b[:], [])
b.extend(range(5))
self.assertEqual(b[:], range(5))
self.assertEqual(b[2], 2)
self.assertEqual(b[2:10], [2,3,4])
b *= 2
self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4])
self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6])
self.assertEqual(a[:], range(10))
d = [a, b]
e = self.list(d)
self.assertEqual(
e[:],
[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]]
)
f = self.list([a])
a.append('hello')
self.assertEqual(f[:], [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello']])
def test_dict(self):
d = self.dict()
indices = range(65, 70)
for i in indices:
d[i] = chr(i)
self.assertEqual(d.copy(), dict((i, chr(i)) for i in indices))
self.assertEqual(sorted(d.keys()), indices)
self.assertEqual(sorted(d.values()), [chr(i) for i in indices])
self.assertEqual(sorted(d.items()), [(i, chr(i)) for i in indices])
def test_namespace(self):
n = self.Namespace()
n.name = 'Bob'
n.job = 'Builder'
n._hidden = 'hidden'
self.assertEqual((n.name, n.job), ('Bob', 'Builder'))
del n.job
self.assertEqual(str(n), "Namespace(name='Bob')")
self.assertTrue(hasattr(n, 'name'))
self.assertTrue(not hasattr(n, 'job'))
#
#
#
def sqr(x, wait=0.0):
time.sleep(wait)
return x*x
class _TestPool(BaseTestCase):
def test_apply(self):
papply = self.pool.apply
self.assertEqual(papply(sqr, (5,)), sqr(5))
self.assertEqual(papply(sqr, (), {'x':3}), sqr(x=3))
def test_map(self):
pmap = self.pool.map
self.assertEqual(pmap(sqr, range(10)), map(sqr, range(10)))
self.assertEqual(pmap(sqr, range(100), chunksize=20),
map(sqr, range(100)))
def test_async(self):
res = self.pool.apply_async(sqr, (7, TIMEOUT1,))
get = TimingWrapper(res.get)
self.assertEqual(get(), 49)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
def test_async_timeout(self):
res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 0.2))
get = TimingWrapper(res.get)
self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2)
def test_imap(self):
it = self.pool.imap(sqr, range(10))
self.assertEqual(list(it), map(sqr, range(10)))
it = self.pool.imap(sqr, range(10))
for i in range(10):
self.assertEqual(it.next(), i*i)
self.assertRaises(StopIteration, it.next)
it = self.pool.imap(sqr, range(1000), chunksize=100)
for i in range(1000):
self.assertEqual(it.next(), i*i)
self.assertRaises(StopIteration, it.next)
def test_imap_unordered(self):
it = self.pool.imap_unordered(sqr, range(1000))
self.assertEqual(sorted(it), map(sqr, range(1000)))
it = self.pool.imap_unordered(sqr, range(1000), chunksize=53)
self.assertEqual(sorted(it), map(sqr, range(1000)))
def test_make_pool(self):
p = multiprocessing.Pool(3)
self.assertEqual(3, len(p._pool))
p.close()
p.join()
def test_terminate(self):
if self.TYPE == 'manager':
# On Unix a forked process increfs each shared object to
# which its parent process held a reference. If the
# forked process gets terminated then there is likely to
# be a reference leak. So to prevent
# _TestZZZNumberOfObjects from failing we skip this test
# when using a manager.
return
result = self.pool.map_async(
time.sleep, [0.1 for i in range(10000)], chunksize=1
)
self.pool.terminate()
join = TimingWrapper(self.pool.join)
join()
self.assertTrue(join.elapsed < 0.2)
#
# Test that manager has expected number of shared objects left
#
class _TestZZZNumberOfObjects(BaseTestCase):
# Because test cases are sorted alphabetically, this one will get
# run after all the other tests for the manager. It tests that
# there have been no "reference leaks" for the manager's shared
# objects. Note the comment in _TestPool.test_terminate().
ALLOWED_TYPES = ('manager',)
def test_number_of_objects(self):
EXPECTED_NUMBER = 1 # the pool object is still alive
multiprocessing.active_children() # discard dead process objs
gc.collect() # do garbage collection
refs = self.manager._number_of_objects()
if refs != EXPECTED_NUMBER:
print self.manager._debugInfo()
self.assertEqual(refs, EXPECTED_NUMBER)
#
# Test of creating a customized manager class
#
from multiprocessing.managers import BaseManager, BaseProxy, RemoteError
class FooBar(object):
def f(self):
return 'f()'
def g(self):
raise ValueError
def _h(self):
return '_h()'
def baz():
for i in xrange(10):
yield i*i
class IteratorProxy(BaseProxy):
_exposed_ = ('next', '__next__')
def __iter__(self):
return self
def next(self):
return self._callmethod('next')
def __next__(self):
return self._callmethod('__next__')
class MyManager(BaseManager):
pass
MyManager.register('Foo', callable=FooBar)
MyManager.register('Bar', callable=FooBar, exposed=('f', '_h'))
MyManager.register('baz', callable=baz, proxytype=IteratorProxy)
class _TestMyManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_mymanager(self):
manager = MyManager()
manager.start()
foo = manager.Foo()
bar = manager.Bar()
baz = manager.baz()
foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)]
bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)]
self.assertEqual(foo_methods, ['f', 'g'])
self.assertEqual(bar_methods, ['f', '_h'])
self.assertEqual(foo.f(), 'f()')
self.assertRaises(ValueError, foo.g)
self.assertEqual(foo._callmethod('f'), 'f()')
self.assertRaises(RemoteError, foo._callmethod, '_h')
self.assertEqual(bar.f(), 'f()')
self.assertEqual(bar._h(), '_h()')
self.assertEqual(bar._callmethod('f'), 'f()')
self.assertEqual(bar._callmethod('_h'), '_h()')
self.assertEqual(list(baz), [i*i for i in range(10)])
manager.shutdown()
#
# Test of connecting to a remote server and using xmlrpclib for serialization
#
_queue = Queue.Queue()
def get_queue():
return _queue
class QueueManager(BaseManager):
'''manager class used by server process'''
QueueManager.register('get_queue', callable=get_queue)
class QueueManager2(BaseManager):
'''manager class which specifies the same interface as QueueManager'''
QueueManager2.register('get_queue')
SERIALIZER = 'xmlrpclib'
class _TestRemoteManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def _putter(self, address, authkey):
manager = QueueManager2(
address=address, authkey=authkey, serializer=SERIALIZER
)
manager.connect()
queue = manager.get_queue()
queue.put(('hello world', None, True, 2.25))
def test_remote(self):
authkey = os.urandom(32)
manager = QueueManager(
address=('localhost', 0), authkey=authkey, serializer=SERIALIZER
)
manager.start()
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.start()
manager2 = QueueManager2(
address=manager.address, authkey=authkey, serializer=SERIALIZER
)
manager2.connect()
queue = manager2.get_queue()
# Note that xmlrpclib will deserialize object as a list not a tuple
self.assertEqual(queue.get(), ['hello world', None, True, 2.25])
# Because we are using xmlrpclib for serialization instead of
# pickle this will cause a serialization error.
self.assertRaises(Exception, queue.put, time.sleep)
# Make queue finalizer run before the server is stopped
del queue
manager.shutdown()
#
#
#
SENTINEL = latin('')
class _TestConnection(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def _echo(self, conn):
for msg in iter(conn.recv_bytes, SENTINEL):
conn.send_bytes(msg)
conn.close()
def test_connection(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.set_daemon(True)
p.start()
seq = [1, 2.25, None]
msg = latin('hello world')
longmsg = msg * 10
arr = array.array('i', range(4))
if self.TYPE == 'processes':
self.assertEqual(type(conn.fileno()), int)
self.assertEqual(conn.send(seq), None)
self.assertEqual(conn.recv(), seq)
self.assertEqual(conn.send_bytes(msg), None)
self.assertEqual(conn.recv_bytes(), msg)
if self.TYPE == 'processes':
buffer = array.array('i', [0]*10)
expected = list(arr) + [0] * (10 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = array.array('i', [0]*10)
expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = bytearray(latin(' ' * 40))
self.assertEqual(conn.send_bytes(longmsg), None)
try:
res = conn.recv_bytes_into(buffer)
except multiprocessing.BufferTooShort, e:
self.assertEqual(e.args, (longmsg,))
else:
self.fail('expected BufferTooShort, got %s' % res)
poll = TimingWrapper(conn.poll)
self.assertEqual(poll(), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(TIMEOUT1), False)
self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1)
conn.send(None)
self.assertEqual(poll(TIMEOUT1), True)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(conn.recv(), None)
really_big_msg = latin('X') * (1024 * 1024 * 16) # 16Mb
conn.send_bytes(really_big_msg)
self.assertEqual(conn.recv_bytes(), really_big_msg)
conn.send_bytes(SENTINEL) # tell child to quit
child_conn.close()
if self.TYPE == 'processes':
self.assertEqual(conn.readable, True)
self.assertEqual(conn.writable, True)
self.assertRaises(EOFError, conn.recv)
self.assertRaises(EOFError, conn.recv_bytes)
p.join()
def test_duplex_false(self):
reader, writer = self.Pipe(duplex=False)
self.assertEqual(writer.send(1), None)
self.assertEqual(reader.recv(), 1)
if self.TYPE == 'processes':
self.assertEqual(reader.readable, True)
self.assertEqual(reader.writable, False)
self.assertEqual(writer.readable, False)
self.assertEqual(writer.writable, True)
self.assertRaises(IOError, reader.send, 2)
self.assertRaises(IOError, writer.recv)
self.assertRaises(IOError, writer.poll)
def test_spawn_close(self):
# We test that a pipe connection can be closed by parent
# process immediately after child is spawned. On Windows this
# would have sometimes failed on old versions because
# child_conn would be closed before the child got a chance to
# duplicate it.
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.start()
child_conn.close() # this might complete before child initializes
msg = latin('hello')
conn.send_bytes(msg)
self.assertEqual(conn.recv_bytes(), msg)
conn.send_bytes(SENTINEL)
conn.close()
p.join()
def test_sendbytes(self):
if self.TYPE != 'processes':
return
msg = latin('abcdefghijklmnopqrstuvwxyz')
a, b = self.Pipe()
a.send_bytes(msg)
self.assertEqual(b.recv_bytes(), msg)
a.send_bytes(msg, 5)
self.assertEqual(b.recv_bytes(), msg[5:])
a.send_bytes(msg, 7, 8)
self.assertEqual(b.recv_bytes(), msg[7:7+8])
a.send_bytes(msg, 26)
self.assertEqual(b.recv_bytes(), latin(''))
a.send_bytes(msg, 26, 0)
self.assertEqual(b.recv_bytes(), latin(''))
self.assertRaises(ValueError, a.send_bytes, msg, 27)
self.assertRaises(ValueError, a.send_bytes, msg, 22, 5)
self.assertRaises(ValueError, a.send_bytes, msg, 26, 1)
self.assertRaises(ValueError, a.send_bytes, msg, -1)
self.assertRaises(ValueError, a.send_bytes, msg, 4, -1)
class _TestListenerClient(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def _test(self, address):
conn = self.connection.Client(address)
conn.send('hello')
conn.close()
def test_listener_client(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
p = self.Process(target=self._test, args=(l.address,))
p.set_daemon(True)
p.start()
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
p.join()
l.close()
#
# Test of sending connection and socket objects between processes
#
class _TestPicklingConnections(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def _listener(self, conn, families):
for fam in families:
l = self.connection.Listener(family=fam)
conn.send(l.address)
new_conn = l.accept()
conn.send(new_conn)
if self.TYPE == 'processes':
l = socket.socket()
l.bind(('localhost', 0))
conn.send(l.getsockname())
l.listen(1)
new_conn, addr = l.accept()
conn.send(new_conn)
conn.recv()
def _remote(self, conn):
for (address, msg) in iter(conn.recv, None):
client = self.connection.Client(address)
client.send(msg.upper())
client.close()
if self.TYPE == 'processes':
address, msg = conn.recv()
client = socket.socket()
client.connect(address)
client.sendall(msg.upper())
client.close()
conn.close()
def test_pickling(self):
try:
multiprocessing.allow_connection_pickling()
except ImportError:
return
families = self.connection.families
lconn, lconn0 = self.Pipe()
lp = self.Process(target=self._listener, args=(lconn0, families))
lp.start()
lconn0.close()
rconn, rconn0 = self.Pipe()
rp = self.Process(target=self._remote, args=(rconn0,))
rp.start()
rconn0.close()
for fam in families:
msg = ('This connection uses family %s' % fam).encode('ascii')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
self.assertEqual(new_conn.recv(), msg.upper())
rconn.send(None)
if self.TYPE == 'processes':
msg = latin('This connection uses a normal socket')
address = lconn.recv()
rconn.send((address, msg))
if hasattr(socket, 'fromfd'):
new_conn = lconn.recv()
self.assertEqual(new_conn.recv(100), msg.upper())
else:
# XXX On Windows with Py2.6 need to backport fromfd()
discard = lconn.recv_bytes()
lconn.send(None)
rconn.close()
lconn.close()
lp.join()
rp.join()
#
#
#
class _TestHeap(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_heap(self):
iterations = 5000
maxblocks = 50
blocks = []
# create and destroy lots of blocks of different sizes
for i in xrange(iterations):
size = int(random.lognormvariate(0, 1) * 1000)
b = multiprocessing.heap.BufferWrapper(size)
blocks.append(b)
if len(blocks) > maxblocks:
i = random.randrange(maxblocks)
del blocks[i]
# get the heap object
heap = multiprocessing.heap.BufferWrapper._heap
# verify the state of the heap
all = []
occupied = 0
for L in heap._len_to_seq.values():
for arena, start, stop in L:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'free'))
for arena, start, stop in heap._allocated_blocks:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'occupied'))
occupied += (stop-start)
all.sort()
for i in range(len(all)-1):
(arena, start, stop) = all[i][:3]
(narena, nstart, nstop) = all[i+1][:3]
self.assertTrue((arena != narena and nstart == 0) or
(stop == nstart))
#
#
#
try:
from ctypes import Structure, Value, copy, c_int, c_double
except ImportError:
Structure = object
c_int = c_double = None
class _Foo(Structure):
_fields_ = [
('x', c_int),
('y', c_double)
]
class _TestSharedCTypes(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def _double(self, x, y, foo, arr, string):
x.value *= 2
y.value *= 2
foo.x *= 2
foo.y *= 2
string.value *= 2
for i in range(len(arr)):
arr[i] *= 2
def test_sharedctypes(self, lock=False):
if c_int is None:
return
x = Value('i', 7, lock=lock)
y = Value(ctypes.c_double, 1.0/3.0, lock=lock)
foo = Value(_Foo, 3, 2, lock=lock)
arr = Array('d', range(10), lock=lock)
string = Array('c', 20, lock=lock)
string.value = 'hello'
p = self.Process(target=self._double, args=(x, y, foo, arr, string))
p.start()
p.join()
self.assertEqual(x.value, 14)
self.assertAlmostEqual(y.value, 2.0/3.0)
self.assertEqual(foo.x, 6)
self.assertAlmostEqual(foo.y, 4.0)
for i in range(10):
self.assertAlmostEqual(arr[i], i*2)
self.assertEqual(string.value, latin('hellohello'))
def test_synchronize(self):
self.test_sharedctypes(lock=True)
def test_copy(self):
if c_int is None:
return
foo = _Foo(2, 5.0)
bar = copy(foo)
foo.x = 0
foo.y = 0
self.assertEqual(bar.x, 2)
self.assertAlmostEqual(bar.y, 5.0)
#
#
#
class _TestFinalize(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def _test_finalize(self, conn):
class Foo(object):
pass
a = Foo()
util.Finalize(a, conn.send, args=('a',))
del a # triggers callback for a
b = Foo()
close_b = util.Finalize(b, conn.send, args=('b',))
close_b() # triggers callback for b
close_b() # does nothing because callback has already been called
del b # does nothing because callback has already been called
c = Foo()
util.Finalize(c, conn.send, args=('c',))
d10 = Foo()
util.Finalize(d10, conn.send, args=('d10',), exitpriority=1)
d01 = Foo()
util.Finalize(d01, conn.send, args=('d01',), exitpriority=0)
d02 = Foo()
util.Finalize(d02, conn.send, args=('d02',), exitpriority=0)
d03 = Foo()
util.Finalize(d03, conn.send, args=('d03',), exitpriority=0)
util.Finalize(None, conn.send, args=('e',), exitpriority=-10)
util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100)
# call mutliprocessing's cleanup function then exit process without
# garbage collecting locals
util._exit_function()
conn.close()
os._exit(0)
def test_finalize(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._test_finalize, args=(child_conn,))
p.start()
p.join()
result = [obj for obj in iter(conn.recv, 'STOP')]
self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e'])
#
# Test that from ... import * works for each module
#
class _TestImportStar(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_import(self):
modules = (
'multiprocessing', 'multiprocessing.connection',
'multiprocessing.heap', 'multiprocessing.managers',
'multiprocessing.pool', 'multiprocessing.process',
'multiprocessing.reduction', 'multiprocessing.sharedctypes',
'multiprocessing.synchronize', 'multiprocessing.util'
)
for name in modules:
__import__(name)
mod = sys.modules[name]
for attr in getattr(mod, '__all__', ()):
self.assertTrue(
hasattr(mod, attr),
'%r does not have attribute %r' % (mod, attr)
)
#
# Quick test that logging works -- does not test logging output
#
class _TestLogging(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_enable_logging(self):
logger = multiprocessing.get_logger()
logger.setLevel(util.SUBWARNING)
self.assertTrue(logger is not None)
logger.debug('this will not be printed')
logger.info('nor will this')
logger.setLevel(LOG_LEVEL)
def _test_level(self, conn):
logger = multiprocessing.get_logger()
conn.send(logger.getEffectiveLevel())
def test_level(self):
LEVEL1 = 32
LEVEL2 = 37
logger = multiprocessing.get_logger()
root_logger = logging.getLogger()
root_level = root_logger.level
reader, writer = multiprocessing.Pipe(duplex=False)
logger.setLevel(LEVEL1)
self.Process(target=self._test_level, args=(writer,)).start()
self.assertEqual(LEVEL1, reader.recv())
logger.setLevel(logging.NOTSET)
root_logger.setLevel(LEVEL2)
self.Process(target=self._test_level, args=(writer,)).start()
self.assertEqual(LEVEL2, reader.recv())
root_logger.setLevel(root_level)
logger.setLevel(level=LOG_LEVEL)
#
# Functions used to create test cases from the base ones in this module
#
def get_attributes(Source, names):
d = {}
for name in names:
obj = getattr(Source, name)
if type(obj) == type(get_attributes):
obj = staticmethod(obj)
d[name] = obj
return d
def create_test_cases(Mixin, type):
result = {}
glob = globals()
Type = type[0].upper() + type[1:]
for name in glob.keys():
if name.startswith('_Test'):
base = glob[name]
if type in base.ALLOWED_TYPES:
newname = 'With' + Type + name[1:]
class Temp(base, unittest.TestCase, Mixin):
pass
result[newname] = Temp
Temp.__name__ = newname
Temp.__module__ = Mixin.__module__
return result
#
# Create test cases
#
class ProcessesMixin(object):
TYPE = 'processes'
Process = multiprocessing.Process
locals().update(get_attributes(multiprocessing, (
'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore',
'Condition', 'Event', 'Value', 'Array', 'RawValue',
'RawArray', 'current_process', 'active_children', 'Pipe',
'connection', 'JoinableQueue'
)))
testcases_processes = create_test_cases(ProcessesMixin, type='processes')
globals().update(testcases_processes)
class ManagerMixin(object):
TYPE = 'manager'
Process = multiprocessing.Process
manager = object.__new__(multiprocessing.managers.SyncManager)
locals().update(get_attributes(manager, (
'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore',
'Condition', 'Event', 'Value', 'Array', 'list', 'dict',
'Namespace', 'JoinableQueue'
)))
testcases_manager = create_test_cases(ManagerMixin, type='manager')
globals().update(testcases_manager)
class ThreadsMixin(object):
TYPE = 'threads'
Process = multiprocessing.dummy.Process
locals().update(get_attributes(multiprocessing.dummy, (
'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore',
'Condition', 'Event', 'Value', 'Array', 'current_process',
'active_children', 'Pipe', 'connection', 'dict', 'list',
'Namespace', 'JoinableQueue'
)))
testcases_threads = create_test_cases(ThreadsMixin, type='threads')
globals().update(testcases_threads)
#
#
#
def test_main(run=None):
if run is None:
from test.test_support import run_unittest as run
util.get_temp_dir() # creates temp directory for use by all processes
multiprocessing.get_logger().setLevel(LOG_LEVEL)
ProcessesMixin.pool = multiprocessing.Pool(4)
ThreadsMixin.pool = multiprocessing.dummy.Pool(4)
ManagerMixin.manager.__init__()
ManagerMixin.manager.start()
ManagerMixin.pool = ManagerMixin.manager.Pool(4)
testcases = (
sorted(testcases_processes.values(), key=lambda tc:tc.__name__) +
sorted(testcases_threads.values(), key=lambda tc:tc.__name__) +
sorted(testcases_manager.values(), key=lambda tc:tc.__name__)
)
loadTestsFromTestCase = unittest.defaultTestLoader.loadTestsFromTestCase
suite = unittest.TestSuite(loadTestsFromTestCase(tc) for tc in testcases)
run(suite)
ThreadsMixin.pool.terminate()
ProcessesMixin.pool.terminate()
ManagerMixin.pool.terminate()
ManagerMixin.manager.shutdown()
del ProcessesMixin.pool, ThreadsMixin.pool, ManagerMixin.pool
def main():
test_main(unittest.TextTestRunner(verbosity=2).run)
if __name__ == '__main__':
main()
/*
* Definition of a `Connection` type.
* Used by `socket_connection.c` and `pipe_connection.c`.
*
* connection.h
*
* Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
*/
#ifndef CONNECTION_H
#define CONNECTION_H
/*
* Read/write flags
*/
#define READABLE 1
#define WRITABLE 2
#define CHECK_READABLE(self) \
if (!(self->flags & READABLE)) { \
PyErr_SetString(PyExc_IOError, "connection is write-only"); \
return NULL; \
}
#define CHECK_WRITABLE(self) \
if (!(self->flags & WRITABLE)) { \
PyErr_SetString(PyExc_IOError, "connection is read-only"); \
return NULL; \
}
/*
* Allocation and deallocation
*/
static PyObject *
connection_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
{
ConnectionObject *self;
HANDLE handle;
BOOL readable = TRUE, writable = TRUE;
static char *kwlist[] = {"handle", "readable", "writable", NULL};
if (!PyArg_ParseTupleAndKeywords(args, kwds, F_HANDLE "|ii", kwlist,
&handle, &readable, &writable))
return NULL;
if (handle == INVALID_HANDLE_VALUE || (Py_ssize_t)handle < 0) {
PyErr_Format(PyExc_IOError, "invalid handle %"
PY_FORMAT_SIZE_T "d", (Py_ssize_t)handle);
return NULL;
}
if (!readable && !writable) {
PyErr_SetString(PyExc_ValueError,
"either readable or writable must be true");
return NULL;
}
self = PyObject_New(ConnectionObject, type);
if (self == NULL)
return NULL;
self->weakreflist = NULL;
self->handle = handle;
self->flags = 0;
if (readable)
self->flags |= READABLE;
if (writable)
self->flags |= WRITABLE;
assert(self->flags >= 1 && self->flags <= 3);
return (PyObject*)self;
}
static void
connection_dealloc(ConnectionObject* self)
{
if (self->weakreflist != NULL)
PyObject_ClearWeakRefs((PyObject*)self);
if (self->handle != INVALID_HANDLE_VALUE) {
Py_BEGIN_ALLOW_THREADS
CLOSE(self->handle);
Py_END_ALLOW_THREADS
}
PyObject_Del(self);
}
/*
* Functions for transferring buffers
*/
static PyObject *
connection_sendbytes(ConnectionObject *self, PyObject *args)
{
char *buffer;
Py_ssize_t length, offset=0, size=PY_SSIZE_T_MIN;
int res;
if (!PyArg_ParseTuple(args, F_RBUFFER "#|" F_PY_SSIZE_T F_PY_SSIZE_T,
&buffer, &length, &offset, &size))
return NULL;
CHECK_WRITABLE(self);
if (offset < 0) {
PyErr_SetString(PyExc_ValueError, "offset is negative");
return NULL;
}
if (length < offset) {
PyErr_SetString(PyExc_ValueError, "buffer length < offset");
return NULL;
}
if (size == PY_SSIZE_T_MIN) {
size = length - offset;
} else {
if (size < 0) {
PyErr_SetString(PyExc_ValueError, "size is negative");
return NULL;
}
if (offset + size > length) {
PyErr_SetString(PyExc_ValueError,
"buffer length < offset + size");
return NULL;
}
}
Py_BEGIN_ALLOW_THREADS
res = conn_send_string(self, buffer + offset, size);
Py_END_ALLOW_THREADS
if (res < 0)
return mp_SetError(PyExc_IOError, res);
Py_RETURN_NONE;
}
static PyObject *
connection_recvbytes(ConnectionObject *self, PyObject *args)
{
char *freeme = NULL;
Py_ssize_t res, maxlength = PY_SSIZE_T_MAX;
PyObject *result = NULL;
if (!PyArg_ParseTuple(args, "|" F_PY_SSIZE_T, &maxlength))
return NULL;
CHECK_READABLE(self);
if (maxlength < 0) {
PyErr_SetString(PyExc_ValueError, "maxlength < 0");
return NULL;
}
Py_BEGIN_ALLOW_THREADS
res = conn_recv_string(self, self->buffer, CONNECTION_BUFFER_SIZE,
&freeme, maxlength);
Py_END_ALLOW_THREADS
if (res < 0) {
if (res == MP_BAD_MESSAGE_LENGTH) {
if ((self->flags & WRITABLE) == 0) {
Py_BEGIN_ALLOW_THREADS
CLOSE(self->handle);
Py_END_ALLOW_THREADS
self->handle = INVALID_HANDLE_VALUE;
} else {
self->flags = WRITABLE;
}
}
mp_SetError(PyExc_IOError, res);
} else {
if (freeme == NULL) {
result = PyString_FromStringAndSize(self->buffer, res);
} else {
result = PyString_FromStringAndSize(freeme, res);
PyMem_Free(freeme);
}
}
return result;
}
static PyObject *
connection_recvbytes_into(ConnectionObject *self, PyObject *args)
{
char *freeme = NULL, *buffer = NULL;
Py_ssize_t res, length, offset = 0;
PyObject *result = NULL;
if (!PyArg_ParseTuple(args, "w#|" F_PY_SSIZE_T,
&buffer, &length, &offset))
return NULL;
CHECK_READABLE(self);
if (offset < 0) {
PyErr_SetString(PyExc_ValueError, "negative offset");
return NULL;
}
if (offset > length) {
PyErr_SetString(PyExc_ValueError, "offset too large");
return NULL;
}
Py_BEGIN_ALLOW_THREADS
res = conn_recv_string(self, buffer+offset, length-offset,
&freeme, PY_SSIZE_T_MAX);
Py_END_ALLOW_THREADS
if (res < 0) {
if (res == MP_BAD_MESSAGE_LENGTH) {
if ((self->flags & WRITABLE) == 0) {
Py_BEGIN_ALLOW_THREADS
CLOSE(self->handle);
Py_END_ALLOW_THREADS
self->handle = INVALID_HANDLE_VALUE;
} else {
self->flags = WRITABLE;
}
}
mp_SetError(PyExc_IOError, res);
} else {
if (freeme == NULL) {
result = PyInt_FromSsize_t(res);
} else {
result = PyObject_CallFunction(BufferTooShort,
F_RBUFFER "#",
freeme, res);
PyMem_Free(freeme);
if (result) {
PyErr_SetObject(BufferTooShort, result);
Py_DECREF(result);
}
return NULL;
}
}
return result;
}
/*
* Functions for transferring objects
*/
static PyObject *
connection_send_obj(ConnectionObject *self, PyObject *obj)
{
char *buffer;
int res;
Py_ssize_t length;
PyObject *pickled_string = NULL;
CHECK_WRITABLE(self);
pickled_string = PyObject_CallFunctionObjArgs(pickle_dumps, obj,
pickle_protocol, NULL);
if (!pickled_string)
goto failure;
if (PyString_AsStringAndSize(pickled_string, &buffer, &length) < 0)
goto failure;
Py_BEGIN_ALLOW_THREADS
res = conn_send_string(self, buffer, (int)length);
Py_END_ALLOW_THREADS
if (res < 0) {
mp_SetError(PyExc_IOError, res);
goto failure;
}
Py_XDECREF(pickled_string);
Py_RETURN_NONE;
failure:
Py_XDECREF(pickled_string);
return NULL;
}
static PyObject *
connection_recv_obj(ConnectionObject *self)
{
char *freeme = NULL;
Py_ssize_t res;
PyObject *temp = NULL, *result = NULL;
CHECK_READABLE(self);
Py_BEGIN_ALLOW_THREADS
res = conn_recv_string(self, self->buffer, CONNECTION_BUFFER_SIZE,
&freeme, PY_SSIZE_T_MAX);
Py_END_ALLOW_THREADS
if (res < 0) {
if (res == MP_BAD_MESSAGE_LENGTH) {
if ((self->flags & WRITABLE) == 0) {
Py_BEGIN_ALLOW_THREADS
CLOSE(self->handle);
Py_END_ALLOW_THREADS
self->handle = INVALID_HANDLE_VALUE;
} else {
self->flags = WRITABLE;
}
}
mp_SetError(PyExc_IOError, res);
} else {
if (freeme == NULL) {
temp = PyString_FromStringAndSize(self->buffer, res);
} else {
temp = PyString_FromStringAndSize(freeme, res);
PyMem_Free(freeme);
}
}
if (temp)
result = PyObject_CallFunctionObjArgs(pickle_loads,
temp, NULL);
Py_XDECREF(temp);
return result;
}
/*
* Other functions
*/
static PyObject *
connection_poll(ConnectionObject *self, PyObject *args)
{
PyObject *timeout_obj = NULL;
double timeout = 0.0;
int res;
CHECK_READABLE(self);
if (!PyArg_ParseTuple(args, "|O", &timeout_obj))
return NULL;
if (timeout_obj == NULL) {
timeout = 0.0;
} else if (timeout_obj == Py_None) {
timeout = -1.0; /* block forever */
} else {
timeout = PyFloat_AsDouble(timeout_obj);
if (PyErr_Occurred())
return NULL;
if (timeout < 0.0)
timeout = 0.0;
}
Py_BEGIN_ALLOW_THREADS
res = conn_poll(self, timeout);
Py_END_ALLOW_THREADS
switch (res) {
case TRUE:
Py_RETURN_TRUE;
case FALSE:
Py_RETURN_FALSE;
default:
return mp_SetError(PyExc_IOError, res);
}
}
static PyObject *
connection_fileno(ConnectionObject* self)
{
if (self->handle == INVALID_HANDLE_VALUE) {
PyErr_SetString(PyExc_IOError, "handle is invalid");
return NULL;
}
return PyInt_FromLong((long)self->handle);
}
static PyObject *
connection_close(ConnectionObject *self)
{
if (self->handle != INVALID_HANDLE_VALUE) {
Py_BEGIN_ALLOW_THREADS
CLOSE(self->handle);
Py_END_ALLOW_THREADS
self->handle = INVALID_HANDLE_VALUE;
}
Py_RETURN_NONE;
}
static PyObject *
connection_repr(ConnectionObject *self)
{
static char *conn_type[] = {"read-only", "write-only", "read-write"};
assert(self->flags >= 1 && self->flags <= 3);
return FROM_FORMAT("<%s %s, handle %" PY_FORMAT_SIZE_T "d>",
conn_type[self->flags - 1],
CONNECTION_NAME, (Py_ssize_t)self->handle);
}
/*
* Getters and setters
*/
static PyObject *
connection_closed(ConnectionObject *self, void *closure)
{
return PyBool_FromLong((long)(self->handle == INVALID_HANDLE_VALUE));
}
static PyObject *
connection_readable(ConnectionObject *self, void *closure)
{
return PyBool_FromLong((long)(self->flags & READABLE));
}
static PyObject *
connection_writable(ConnectionObject *self, void *closure)
{
return PyBool_FromLong((long)(self->flags & WRITABLE));
}
/*
* Tables
*/
static PyMethodDef connection_methods[] = {
{"send_bytes", (PyCFunction)connection_sendbytes, METH_VARARGS,
"send the byte data from a readable buffer-like object"},
{"recv_bytes", (PyCFunction)connection_recvbytes, METH_VARARGS,
"receive byte data as a string"},
{"recv_bytes_into",(PyCFunction)connection_recvbytes_into,METH_VARARGS,
"receive byte data into a writeable buffer-like object\n"
"returns the number of bytes read"},
{"send", (PyCFunction)connection_send_obj, METH_O,
"send a (picklable) object"},
{"recv", (PyCFunction)connection_recv_obj, METH_NOARGS,
"receive a (picklable) object"},
{"poll", (PyCFunction)connection_poll, METH_VARARGS,
"whether there is any input available to be read"},
{"fileno", (PyCFunction)connection_fileno, METH_NOARGS,
"file descriptor or handle of the connection"},
{"close", (PyCFunction)connection_close, METH_NOARGS,
"close the connection"},
{NULL} /* Sentinel */
};
static PyGetSetDef connection_getset[] = {
{"closed", (getter)connection_closed, NULL,
"True if the connection is closed", NULL},
{"readable", (getter)connection_readable, NULL,
"True if the connection is readable", NULL},
{"writable", (getter)connection_writable, NULL,
"True if the connection is writable", NULL},
{NULL}
};
/*
* Connection type
*/
PyDoc_STRVAR(connection_doc,
"Connection type whose constructor signature is\n\n"
" Connection(handle, readable=True, writable=True).\n\n"
"The constructor does *not* duplicate the handle.");
PyTypeObject CONNECTION_TYPE = {
PyVarObject_HEAD_INIT(NULL, 0)
/* tp_name */ "_multiprocessing." CONNECTION_NAME,
/* tp_basicsize */ sizeof(ConnectionObject),
/* tp_itemsize */ 0,
/* tp_dealloc */ (destructor)connection_dealloc,
/* tp_print */ 0,
/* tp_getattr */ 0,
/* tp_setattr */ 0,
/* tp_compare */ 0,
/* tp_repr */ (reprfunc)connection_repr,
/* tp_as_number */ 0,
/* tp_as_sequence */ 0,
/* tp_as_mapping */ 0,
/* tp_hash */ 0,
/* tp_call */ 0,
/* tp_str */ 0,
/* tp_getattro */ 0,
/* tp_setattro */ 0,
/* tp_as_buffer */ 0,
/* tp_flags */ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE |
Py_TPFLAGS_HAVE_WEAKREFS,
/* tp_doc */ connection_doc,
/* tp_traverse */ 0,
/* tp_clear */ 0,
/* tp_richcompare */ 0,
/* tp_weaklistoffset */ offsetof(ConnectionObject, weakreflist),
/* tp_iter */ 0,
/* tp_iternext */ 0,
/* tp_methods */ connection_methods,
/* tp_members */ 0,
/* tp_getset */ connection_getset,
/* tp_base */ 0,
/* tp_dict */ 0,
/* tp_descr_get */ 0,
/* tp_descr_set */ 0,
/* tp_dictoffset */ 0,
/* tp_init */ 0,
/* tp_alloc */ 0,
/* tp_new */ connection_new,
};
#endif /* CONNECTION_H */
/*
* Extension module used by mutliprocessing package
*
* multiprocessing.c
*
* Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
*/
#include "multiprocessing.h"
PyObject *create_win32_namespace(void);
PyObject *pickle_dumps, *pickle_loads, *pickle_protocol;
PyObject *ProcessError, *BufferTooShort;
/*
* Function which raises exceptions based on error codes
*/
PyObject *
mp_SetError(PyObject *Type, int num)
{
switch (num) {
#ifdef MS_WINDOWS
case MP_STANDARD_ERROR:
if (Type == NULL)
Type = PyExc_WindowsError;
PyErr_SetExcFromWindowsErr(Type, 0);
break;
case MP_SOCKET_ERROR:
if (Type == NULL)
Type = PyExc_WindowsError;
PyErr_SetExcFromWindowsErr(Type, WSAGetLastError());
break;
#else /* !MS_WINDOWS */
case MP_STANDARD_ERROR:
case MP_SOCKET_ERROR:
if (Type == NULL)
Type = PyExc_OSError;
PyErr_SetFromErrno(Type);
break;
#endif /* !MS_WINDOWS */
case MP_MEMORY_ERROR:
PyErr_NoMemory();
break;
case MP_END_OF_FILE:
PyErr_SetNone(PyExc_EOFError);
break;
case MP_EARLY_END_OF_FILE:
PyErr_SetString(PyExc_IOError,
"got end of file during message");
break;
case MP_BAD_MESSAGE_LENGTH:
PyErr_SetString(PyExc_IOError, "bad message length");
break;
case MP_EXCEPTION_HAS_BEEN_SET:
break;
default:
PyErr_Format(PyExc_RuntimeError,
"unkown error number %d", num);
}
return NULL;
}
/*
* Windows only
*/
#ifdef MS_WINDOWS
/* On Windows we set an event to signal Ctrl-C; compare with timemodule.c */
HANDLE sigint_event = NULL;
static BOOL WINAPI
ProcessingCtrlHandler(DWORD dwCtrlType)
{
SetEvent(sigint_event);
return FALSE;
}
/*
* Unix only
*/
#else /* !MS_WINDOWS */
#if HAVE_FD_TRANSFER
/* Functions for transferring file descriptors between processes.
Reimplements some of the functionality of the fdcred
module at http://www.mca-ltd.com/resources/fdcred_1.tgz. */
static PyObject *
multiprocessing_sendfd(PyObject *self, PyObject *args)
{
int conn, fd, res;
char dummy_char;
char buf[CMSG_SPACE(sizeof(int))];
struct msghdr msg = {0};
struct iovec dummy_iov;
struct cmsghdr *cmsg;
if (!PyArg_ParseTuple(args, "ii", &conn, &fd))
return NULL;
dummy_iov.iov_base = &dummy_char;
dummy_iov.iov_len = 1;
msg.msg_control = buf;
msg.msg_controllen = sizeof(buf);
msg.msg_iov = &dummy_iov;
msg.msg_iovlen = 1;
cmsg = CMSG_FIRSTHDR(&msg);
cmsg->cmsg_level = SOL_SOCKET;
cmsg->cmsg_type = SCM_RIGHTS;
cmsg->cmsg_len = CMSG_LEN(sizeof(int));
msg.msg_controllen = cmsg->cmsg_len;
*(int*)CMSG_DATA(cmsg) = fd;
Py_BEGIN_ALLOW_THREADS
res = sendmsg(conn, &msg, 0);
Py_END_ALLOW_THREADS
if (res < 0)
return PyErr_SetFromErrno(PyExc_OSError);
Py_RETURN_NONE;
}
static PyObject *
multiprocessing_recvfd(PyObject *self, PyObject *args)
{
int conn, fd, res;
char dummy_char;
char buf[CMSG_SPACE(sizeof(int))];
struct msghdr msg = {0};
struct iovec dummy_iov;
struct cmsghdr *cmsg;
if (!PyArg_ParseTuple(args, "i", &conn))
return NULL;
dummy_iov.iov_base = &dummy_char;
dummy_iov.iov_len = 1;
msg.msg_control = buf;
msg.msg_controllen = sizeof(buf);
msg.msg_iov = &dummy_iov;
msg.msg_iovlen = 1;
cmsg = CMSG_FIRSTHDR(&msg);
cmsg->cmsg_level = SOL_SOCKET;
cmsg->cmsg_type = SCM_RIGHTS;
cmsg->cmsg_len = CMSG_LEN(sizeof(int));
msg.msg_controllen = cmsg->cmsg_len;
Py_BEGIN_ALLOW_THREADS
res = recvmsg(conn, &msg, 0);
Py_END_ALLOW_THREADS
if (res < 0)
return PyErr_SetFromErrno(PyExc_OSError);
fd = *(int*)CMSG_DATA(cmsg);
return Py_BuildValue("i", fd);
}
#endif /* HAVE_FD_TRANSFER */
#endif /* !MS_WINDOWS */
/*
* All platforms
*/
static PyObject*
multiprocessing_address_of_buffer(PyObject *self, PyObject *obj)
{
void *buffer;
Py_ssize_t buffer_len;
if (PyObject_AsWriteBuffer(obj, &buffer, &buffer_len) < 0)
return NULL;
return Py_BuildValue("N" F_PY_SSIZE_T,
PyLong_FromVoidPtr(buffer), buffer_len);
}
/*
* Function table
*/
static PyMethodDef module_methods[] = {
{"address_of_buffer", multiprocessing_address_of_buffer, METH_O,
"address_of_buffer(obj) -> int\n"
"Return address of obj assuming obj supports buffer inteface"},
#if HAVE_FD_TRANSFER
{"sendfd", multiprocessing_sendfd, METH_VARARGS,
"sendfd(sockfd, fd) -> None\n"
"Send file descriptor given by fd over the unix domain socket\n"
"whose file decriptor is sockfd"},
{"recvfd", multiprocessing_recvfd, METH_VARARGS,
"recvfd(sockfd) -> fd\n"
"Receive a file descriptor over a unix domain socket\n"
"whose file decriptor is sockfd"},
#endif
{NULL}
};
/*
* Initialize
*/
PyMODINIT_FUNC
init_multiprocessing(void)
{
PyObject *module, *temp;
/* Initialize module */
module = Py_InitModule("_multiprocessing", module_methods);
if (!module)
return;
/* Get copy of objects from pickle */
temp = PyImport_ImportModule(PICKLE_MODULE);
if (!temp)
return;
pickle_dumps = PyObject_GetAttrString(temp, "dumps");
pickle_loads = PyObject_GetAttrString(temp, "loads");
pickle_protocol = PyObject_GetAttrString(temp, "HIGHEST_PROTOCOL");
Py_XDECREF(temp);
/* Get copy of BufferTooShort */
temp = PyImport_ImportModule("multiprocessing");
if (!temp)
return;
BufferTooShort = PyObject_GetAttrString(temp, "BufferTooShort");
Py_XDECREF(temp);
/* Add connection type to module */
if (PyType_Ready(&ConnectionType) < 0)
return;
Py_INCREF(&ConnectionType);
PyModule_AddObject(module, "Connection", (PyObject*)&ConnectionType);
#if defined(MS_WINDOWS) || HAVE_SEM_OPEN
/* Add SemLock type to module */
if (PyType_Ready(&SemLockType) < 0)
return;
Py_INCREF(&SemLockType);
PyDict_SetItemString(SemLockType.tp_dict, "SEM_VALUE_MAX",
Py_BuildValue("i", SEM_VALUE_MAX));
PyModule_AddObject(module, "SemLock", (PyObject*)&SemLockType);
#endif
#ifdef MS_WINDOWS
/* Add PipeConnection to module */
if (PyType_Ready(&PipeConnectionType) < 0)
return;
Py_INCREF(&PipeConnectionType);
PyModule_AddObject(module, "PipeConnection",
(PyObject*)&PipeConnectionType);
/* Initialize win32 class and add to multiprocessing */
temp = create_win32_namespace();
if (!temp)
return;
PyModule_AddObject(module, "win32", temp);
/* Initialize the event handle used to signal Ctrl-C */
sigint_event = CreateEvent(NULL, TRUE, FALSE, NULL);
if (!sigint_event) {
PyErr_SetFromWindowsErr(0);
return;
}
if (!SetConsoleCtrlHandler(ProcessingCtrlHandler, TRUE)) {
PyErr_SetFromWindowsErr(0);
return;
}
#endif
/* Add configuration macros */
temp = PyDict_New();
if (!temp)
return;
if (PyModule_AddObject(module, "flags", temp) < 0)
return;
#define ADD_FLAG(name) \
if (PyDict_SetItemString(temp, #name, Py_BuildValue("i", name)) < 0) return
#ifdef HAVE_SEM_OPEN
ADD_FLAG(HAVE_SEM_OPEN);
#endif
#ifdef HAVE_SEM_TIMEDWAIT
ADD_FLAG(HAVE_SEM_TIMEDWAIT);
#endif
#ifdef HAVE_FD_TRANSFER
ADD_FLAG(HAVE_FD_TRANSFER);
#endif
#ifdef HAVE_BROKEN_SEM_GETVALUE
ADD_FLAG(HAVE_BROKEN_SEM_GETVALUE);
#endif
#ifdef HAVE_BROKEN_SEM_UNLINK
ADD_FLAG(HAVE_BROKEN_SEM_UNLINK);
#endif
}
#ifndef MULTIPROCESSING_H
#define MULTIPROCESSING_H
#define PY_SSIZE_T_CLEAN
#include "Python.h"
#include "structmember.h"
#include "pythread.h"
/*
* Platform includes and definitions
*/
#ifdef MS_WINDOWS
# define WIN32_LEAN_AND_MEAN
# include <windows.h>
# include <winsock2.h>
# include <process.h> /* getpid() */
# define SEM_HANDLE HANDLE
# define SEM_VALUE_MAX LONG_MAX
#else
# include <fcntl.h> /* O_CREAT and O_EXCL */
# include <sys/socket.h>
# include <arpa/inet.h> /* htonl() and ntohl() */
# if HAVE_SEM_OPEN
# include <semaphore.h>
typedef sem_t *SEM_HANDLE;
# endif
# define HANDLE int
# define SOCKET int
# define BOOL int
# define UINT32 uint32_t
# define INT32 int32_t
# define TRUE 1
# define FALSE 0
# define INVALID_HANDLE_VALUE (-1)
#endif
/*
* Make sure Py_ssize_t available
*/
#if PY_VERSION_HEX < 0x02050000 && !defined(PY_SSIZE_T_MIN)
typedef int Py_ssize_t;
# define PY_SSIZE_T_MAX INT_MAX
# define PY_SSIZE_T_MIN INT_MIN
# define F_PY_SSIZE_T "i"
# define PY_FORMAT_SIZE_T ""
# define PyInt_FromSsize_t(n) PyInt_FromLong((long)n)
#else
# define F_PY_SSIZE_T "n"
#endif
/*
* Format codes
*/
#if SIZEOF_VOID_P == SIZEOF_LONG
# define F_POINTER "k"
# define T_POINTER T_ULONG
#elif defined(HAVE_LONG_LONG) && (SIZEOF_VOID_P == SIZEOF_LONG_LONG)
# define F_POINTER "K"
# define T_POINTER T_ULONGLONG
#else
# error "can't find format code for unsigned integer of same size as void*"
#endif
#ifdef MS_WINDOWS
# define F_HANDLE F_POINTER
# define T_HANDLE T_POINTER
# define F_SEM_HANDLE F_HANDLE
# define T_SEM_HANDLE T_HANDLE
# define F_DWORD "k"
# define T_DWORD T_ULONG
#else
# define F_HANDLE "i"
# define T_HANDLE T_INT
# define F_SEM_HANDLE F_POINTER
# define T_SEM_HANDLE T_POINTER
#endif
#if PY_VERSION_HEX >= 0x03000000
# define F_RBUFFER "y"
#else
# define F_RBUFFER "s"
#endif
/*
* Error codes which can be returned by functions called without GIL
*/
#define MP_SUCCESS (0)
#define MP_STANDARD_ERROR (-1)
#define MP_MEMORY_ERROR (-1001)
#define MP_END_OF_FILE (-1002)
#define MP_EARLY_END_OF_FILE (-1003)
#define MP_BAD_MESSAGE_LENGTH (-1004)
#define MP_SOCKET_ERROR (-1005)
#define MP_EXCEPTION_HAS_BEEN_SET (-1006)
PyObject *mp_SetError(PyObject *Type, int num);
/*
* Externs - not all will really exist on all platforms
*/
extern PyObject *pickle_dumps;
extern PyObject *pickle_loads;
extern PyObject *pickle_protocol;
extern PyObject *BufferTooShort;
extern PyTypeObject SemLockType;
extern PyTypeObject ConnectionType;
extern PyTypeObject PipeConnectionType;
extern HANDLE sigint_event;
/*
* Py3k compatibility
*/
#if PY_VERSION_HEX >= 0x03000000
# define PICKLE_MODULE "pickle"
# define FROM_FORMAT PyUnicode_FromFormat
# define PyInt_FromLong PyLong_FromLong
# define PyInt_FromSsize_t PyLong_FromSsize_t
#else
# define PICKLE_MODULE "cPickle"
# define FROM_FORMAT PyString_FromFormat
#endif
#ifndef PyVarObject_HEAD_INIT
# define PyVarObject_HEAD_INIT(type, size) PyObject_HEAD_INIT(type) size,
#endif
#ifndef Py_TPFLAGS_HAVE_WEAKREFS
# define Py_TPFLAGS_HAVE_WEAKREFS 0
#endif
/*
* Connection definition
*/
#define CONNECTION_BUFFER_SIZE 1024
typedef struct {
PyObject_HEAD
HANDLE handle;
int flags;
PyObject *weakreflist;
char buffer[CONNECTION_BUFFER_SIZE];
} ConnectionObject;
/*
* Miscellaneous
*/
#define MAX_MESSAGE_LENGTH 0x7fffffff
#ifndef MIN
# define MIN(x, y) ((x) < (y) ? x : y)
# define MAX(x, y) ((x) > (y) ? x : y)
#endif
#endif /* MULTIPROCESSING_H */
/*
* A type which wraps a pipe handle in message oriented mode
*
* pipe_connection.c
*
* Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
*/
#include "multiprocessing.h"
#define CLOSE(h) CloseHandle(h)
/*
* Send string to the pipe; assumes in message oriented mode
*/
static Py_ssize_t
conn_send_string(ConnectionObject *conn, char *string, size_t length)
{
DWORD amount_written;
return WriteFile(conn->handle, string, length, &amount_written, NULL)
? MP_SUCCESS : MP_STANDARD_ERROR;
}
/*
* Attempts to read into buffer, or if buffer too small into *newbuffer.
*
* Returns number of bytes read. Assumes in message oriented mode.
*/
static Py_ssize_t
conn_recv_string(ConnectionObject *conn, char *buffer,
size_t buflength, char **newbuffer, size_t maxlength)
{
DWORD left, length, full_length, err;
*newbuffer = NULL;
if (ReadFile(conn->handle, buffer, MIN(buflength, maxlength),
&length, NULL))
return length;
err = GetLastError();
if (err != ERROR_MORE_DATA) {
if (err == ERROR_BROKEN_PIPE)
return MP_END_OF_FILE;
return MP_STANDARD_ERROR;
}
if (!PeekNamedPipe(conn->handle, NULL, 0, NULL, NULL, &left))
return MP_STANDARD_ERROR;
full_length = length + left;
if (full_length > maxlength)
return MP_BAD_MESSAGE_LENGTH;
*newbuffer = PyMem_Malloc(full_length);
if (*newbuffer == NULL)
return MP_MEMORY_ERROR;
memcpy(*newbuffer, buffer, length);
if (ReadFile(conn->handle, *newbuffer+length, left, &length, NULL)) {
assert(length == left);
return full_length;
} else {
PyMem_Free(*newbuffer);
return MP_STANDARD_ERROR;
}
}
/*
* Check whether any data is available for reading
*/
#define conn_poll(conn, timeout) conn_poll_save(conn, timeout, _save)
static int
conn_poll_save(ConnectionObject *conn, double timeout, PyThreadState *_save)
{
DWORD bytes, deadline, delay;
int difference, res;
BOOL block = FALSE;
if (!PeekNamedPipe(conn->handle, NULL, 0, NULL, &bytes, NULL))
return MP_STANDARD_ERROR;
if (timeout == 0.0)
return bytes > 0;
if (timeout < 0.0)
block = TRUE;
else
/* XXX does not check for overflow */
deadline = GetTickCount() + (DWORD)(1000 * timeout + 0.5);
Sleep(0);
for (delay = 1 ; ; delay += 1) {
if (!PeekNamedPipe(conn->handle, NULL, 0, NULL, &bytes, NULL))
return MP_STANDARD_ERROR;
else if (bytes > 0)
return TRUE;
if (!block) {
difference = deadline - GetTickCount();
if (difference < 0)
return FALSE;
if ((int)delay > difference)
delay = difference;
}
if (delay > 20)
delay = 20;
Sleep(delay);
/* check for signals */
Py_BLOCK_THREADS
res = PyErr_CheckSignals();
Py_UNBLOCK_THREADS
if (res)
return MP_EXCEPTION_HAS_BEEN_SET;
}
}
/*
* "connection.h" defines the PipeConnection type using the definitions above
*/
#define CONNECTION_NAME "PipeConnection"
#define CONNECTION_TYPE PipeConnectionType
#include "connection.h"
/*
* A type which wraps a semaphore
*
* semaphore.c
*
* Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
*/
#include "multiprocessing.h"
enum { RECURSIVE_MUTEX, SEMAPHORE };
typedef struct {
PyObject_HEAD
SEM_HANDLE handle;
long last_tid;
int count;
int maxvalue;
int kind;
} SemLockObject;
#define ISMINE(o) (o->count > 0 && PyThread_get_thread_ident() == o->last_tid)
#ifdef MS_WINDOWS
/*
* Windows definitions
*/
#define SEM_FAILED NULL
#define SEM_CLEAR_ERROR() SetLastError(0)
#define SEM_GET_LAST_ERROR() GetLastError()
#define SEM_CREATE(name, val, max) CreateSemaphore(NULL, val, max, NULL)
#define SEM_CLOSE(sem) (CloseHandle(sem) ? 0 : -1)
#define SEM_GETVALUE(sem, pval) _GetSemaphoreValue(sem, pval)
#define SEM_UNLINK(name) 0
static int
_GetSemaphoreValue(HANDLE handle, long *value)
{
long previous;
switch (WaitForSingleObject(handle, 0)) {
case WAIT_OBJECT_0:
if (!ReleaseSemaphore(handle, 1, &previous))
return MP_STANDARD_ERROR;
*value = previous + 1;
return 0;
case WAIT_TIMEOUT:
*value = 0;
return 0;
default:
return MP_STANDARD_ERROR;
}
}
static PyObject *
semlock_acquire(SemLockObject *self, PyObject *args, PyObject *kwds)
{
int blocking = 1;
double timeout;
PyObject *timeout_obj = Py_None;
DWORD res, full_msecs, msecs, start, ticks;
static char *kwlist[] = {"block", "timeout", NULL};
if (!PyArg_ParseTupleAndKeywords(args, kwds, "|iO", kwlist,
&blocking, &timeout_obj))
return NULL;
/* calculate timeout */
if (!blocking) {
full_msecs = 0;
} else if (timeout_obj == Py_None) {
full_msecs = INFINITE;
} else {
timeout = PyFloat_AsDouble(timeout_obj);
if (PyErr_Occurred())
return NULL;
timeout *= 1000.0; /* convert to millisecs */
if (timeout < 0.0) {
timeout = 0.0;
} else if (timeout >= 0.5 * INFINITE) { /* 25 days */
PyErr_SetString(PyExc_OverflowError,
"timeout is too large");
return NULL;
}
full_msecs = (DWORD)(timeout + 0.5);
}
/* check whether we already own the lock */
if (self->kind == RECURSIVE_MUTEX && ISMINE(self)) {
++self->count;
Py_RETURN_TRUE;
}
/* check whether we can acquire without blocking */
if (WaitForSingleObject(self->handle, 0) == WAIT_OBJECT_0) {
self->last_tid = GetCurrentThreadId();
++self->count;
Py_RETURN_TRUE;
}
msecs = full_msecs;
start = GetTickCount();
for ( ; ; ) {
HANDLE handles[2] = {self->handle, sigint_event};
/* do the wait */
Py_BEGIN_ALLOW_THREADS
ResetEvent(sigint_event);
res = WaitForMultipleObjects(2, handles, FALSE, msecs);
Py_END_ALLOW_THREADS
/* handle result */
if (res != WAIT_OBJECT_0 + 1)
break;
/* got SIGINT so give signal handler a chance to run */
Sleep(1);
/* if this is main thread let KeyboardInterrupt be raised */
if (PyErr_CheckSignals())
return NULL;
/* recalculate timeout */
if (msecs != INFINITE) {
ticks = GetTickCount();
if ((DWORD)(ticks - start) >= full_msecs)
Py_RETURN_FALSE;
msecs = full_msecs - (ticks - start);
}
}
/* handle result */
switch (res) {
case WAIT_TIMEOUT:
Py_RETURN_FALSE;
case WAIT_OBJECT_0:
self->last_tid = GetCurrentThreadId();
++self->count;
Py_RETURN_TRUE;
case WAIT_FAILED:
return PyErr_SetFromWindowsErr(0);
default:
PyErr_Format(PyExc_RuntimeError, "WaitForSingleObject() or "
"WaitForMultipleObjects() gave unrecognized "
"value %d", res);
return NULL;
}
}
static PyObject *
semlock_release(SemLockObject *self, PyObject *args)
{
if (self->kind == RECURSIVE_MUTEX) {
if (!ISMINE(self)) {
PyErr_SetString(PyExc_AssertionError, "attempt to "
"release recursive lock not owned "
"by thread");
return NULL;
}
if (self->count > 1) {
--self->count;
Py_RETURN_NONE;
}
assert(self->count == 1);
}
if (!ReleaseSemaphore(self->handle, 1, NULL)) {
if (GetLastError() == ERROR_TOO_MANY_POSTS) {
PyErr_SetString(PyExc_ValueError, "semaphore or lock "
"released too many times");
return NULL;
} else {
return PyErr_SetFromWindowsErr(0);
}
}
--self->count;
Py_RETURN_NONE;
}
#else /* !MS_WINDOWS */
/*
* Unix definitions
*/
#define SEM_CLEAR_ERROR()
#define SEM_GET_LAST_ERROR() 0
#define SEM_CREATE(name, val, max) sem_open(name, O_CREAT | O_EXCL, 0600, val)
#define SEM_CLOSE(sem) sem_close(sem)
#define SEM_GETVALUE(sem, pval) sem_getvalue(sem, pval)
#define SEM_UNLINK(name) sem_unlink(name)
#if HAVE_BROKEN_SEM_UNLINK
# define sem_unlink(name) 0
#endif
#if !HAVE_SEM_TIMEDWAIT
# define sem_timedwait(sem,deadline) sem_timedwait_save(sem,deadline,_save)
int
sem_timedwait_save(sem_t *sem, struct timespec *deadline, PyThreadState *_save)
{
int res;
unsigned long delay, difference;
struct timeval now, tvdeadline, tvdelay;
errno = 0;
tvdeadline.tv_sec = deadline->tv_sec;
tvdeadline.tv_usec = deadline->tv_nsec / 1000;
for (delay = 0 ; ; delay += 1000) {
/* poll */
if (sem_trywait(sem) == 0)
return 0;
else if (errno != EAGAIN)
return MP_STANDARD_ERROR;
/* get current time */
if (gettimeofday(&now, NULL) < 0)
return MP_STANDARD_ERROR;
/* check for timeout */
if (tvdeadline.tv_sec < now.tv_sec ||
(tvdeadline.tv_sec == now.tv_sec &&
tvdeadline.tv_usec <= now.tv_usec)) {
errno = ETIMEDOUT;
return MP_STANDARD_ERROR;
}
/* calculate how much time is left */
difference = (tvdeadline.tv_sec - now.tv_sec) * 1000000 +
(tvdeadline.tv_usec - now.tv_usec);
/* check delay not too long -- maximum is 20 msecs */
if (delay > 20000)
delay = 20000;
if (delay > difference)
delay = difference;
/* sleep */
tvdelay.tv_sec = delay / 1000000;
tvdelay.tv_usec = delay % 1000000;
if (select(0, NULL, NULL, NULL, &tvdelay) < 0)
return MP_STANDARD_ERROR;
/* check for signals */
Py_BLOCK_THREADS
res = PyErr_CheckSignals();
Py_UNBLOCK_THREADS
if (res) {
errno = EINTR;
return MP_EXCEPTION_HAS_BEEN_SET;
}
}
}
#endif /* !HAVE_SEM_TIMEDWAIT */
static PyObject *
semlock_acquire(SemLockObject *self, PyObject *args, PyObject *kwds)
{
int blocking = 1, res;
double timeout;
PyObject *timeout_obj = Py_None;
struct timespec deadline = {0};
struct timeval now;
long sec, nsec;
static char *kwlist[] = {"block", "timeout", NULL};
if (!PyArg_ParseTupleAndKeywords(args, kwds, "|iO", kwlist,
&blocking, &timeout_obj))
return NULL;
if (self->kind == RECURSIVE_MUTEX && ISMINE(self)) {
++self->count;
Py_RETURN_TRUE;
}
if (timeout_obj != Py_None) {
timeout = PyFloat_AsDouble(timeout_obj);
if (PyErr_Occurred())
return NULL;
if (timeout < 0.0)
timeout = 0.0;
if (gettimeofday(&now, NULL) < 0) {
PyErr_SetFromErrno(PyExc_OSError);
return NULL;
}
sec = (long) timeout;
nsec = (long) (1e9 * (timeout - sec) + 0.5);
deadline.tv_sec = now.tv_sec + sec;
deadline.tv_nsec = now.tv_usec * 1000 + nsec;
deadline.tv_sec += (deadline.tv_nsec / 1000000000);
deadline.tv_nsec %= 1000000000;
}
do {
Py_BEGIN_ALLOW_THREADS
if (blocking && timeout_obj == Py_None)
res = sem_wait(self->handle);
else if (!blocking)
res = sem_trywait(self->handle);
else
res = sem_timedwait(self->handle, &deadline);
Py_END_ALLOW_THREADS
if (res == MP_EXCEPTION_HAS_BEEN_SET)
break;
} while (res < 0 && errno == EINTR && !PyErr_CheckSignals());
if (res < 0) {
if (errno == EAGAIN || errno == ETIMEDOUT)
Py_RETURN_FALSE;
else if (errno == EINTR)
return NULL;
else
return PyErr_SetFromErrno(PyExc_OSError);
}
++self->count;
self->last_tid = PyThread_get_thread_ident();
Py_RETURN_TRUE;
}
static PyObject *
semlock_release(SemLockObject *self, PyObject *args)
{
if (self->kind == RECURSIVE_MUTEX) {
if (!ISMINE(self)) {
PyErr_SetString(PyExc_AssertionError, "attempt to "
"release recursive lock not owned "
"by thread");
return NULL;
}
if (self->count > 1) {
--self->count;
Py_RETURN_NONE;
}
assert(self->count == 1);
} else {
#if HAVE_BROKEN_SEM_GETVALUE
/* We will only check properly the maxvalue == 1 case */
if (self->maxvalue == 1) {
/* make sure that already locked */
if (sem_trywait(self->handle) < 0) {
if (errno != EAGAIN) {
PyErr_SetFromErrno(PyExc_OSError);
return NULL;
}
/* it is already locked as expected */
} else {
/* it was not locked so undo wait and raise */
if (sem_post(self->handle) < 0) {
PyErr_SetFromErrno(PyExc_OSError);
return NULL;
}
PyErr_SetString(PyExc_ValueError, "semaphore "
"or lock released too many "
"times");
return NULL;
}
}
#else
int sval;
/* This check is not an absolute guarantee that the semaphore
does not rise above maxvalue. */
if (sem_getvalue(self->handle, &sval) < 0) {
return PyErr_SetFromErrno(PyExc_OSError);
} else if (sval >= self->maxvalue) {
PyErr_SetString(PyExc_ValueError, "semaphore or lock "
"released too many times");
return NULL;
}
#endif
}
if (sem_post(self->handle) < 0)
return PyErr_SetFromErrno(PyExc_OSError);
--self->count;
Py_RETURN_NONE;
}
#endif /* !MS_WINDOWS */
/*
* All platforms
*/
static PyObject *
newsemlockobject(PyTypeObject *type, SEM_HANDLE handle, int kind, int maxvalue)
{
SemLockObject *self;
self = PyObject_New(SemLockObject, type);
if (!self)
return NULL;
self->handle = handle;
self->kind = kind;
self->count = 0;
self->last_tid = 0;
self->maxvalue = maxvalue;
return (PyObject*)self;
}
static PyObject *
semlock_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
{
char buffer[256];
SEM_HANDLE handle = SEM_FAILED;
int kind, maxvalue, value;
PyObject *result;
static char *kwlist[] = {"kind", "value", "maxvalue", NULL};
static int counter = 0;
if (!PyArg_ParseTupleAndKeywords(args, kwds, "iii", kwlist,
&kind, &value, &maxvalue))
return NULL;
if (kind != RECURSIVE_MUTEX && kind != SEMAPHORE) {
PyErr_SetString(PyExc_ValueError, "unrecognized kind");
return NULL;
}
PyOS_snprintf(buffer, sizeof(buffer), "/mp%d-%d", getpid(), counter++);
SEM_CLEAR_ERROR();
handle = SEM_CREATE(buffer, value, maxvalue);
/* On Windows we should fail if GetLastError()==ERROR_ALREADY_EXISTS */
if (handle == SEM_FAILED || SEM_GET_LAST_ERROR() != 0)
goto failure;
if (SEM_UNLINK(buffer) < 0)
goto failure;
result = newsemlockobject(type, handle, kind, maxvalue);
if (!result)
goto failure;
return result;
failure:
if (handle != SEM_FAILED)
SEM_CLOSE(handle);
mp_SetError(NULL, MP_STANDARD_ERROR);
return NULL;
}
static PyObject *
semlock_rebuild(PyTypeObject *type, PyObject *args)
{
SEM_HANDLE handle;
int kind, maxvalue;
if (!PyArg_ParseTuple(args, F_SEM_HANDLE "ii",
&handle, &kind, &maxvalue))
return NULL;
return newsemlockobject(type, handle, kind, maxvalue);
}
static void
semlock_dealloc(SemLockObject* self)
{
if (self->handle != SEM_FAILED)
SEM_CLOSE(self->handle);
PyObject_Del(self);
}
static PyObject *
semlock_count(SemLockObject *self)
{
return PyInt_FromLong((long)self->count);
}
static PyObject *
semlock_ismine(SemLockObject *self)
{
/* only makes sense for a lock */
return PyBool_FromLong(ISMINE(self));
}
static PyObject *
semlock_getvalue(SemLockObject *self)
{
#if HAVE_BROKEN_SEM_GETVALUE
PyErr_SetNone(PyExc_NotImplementedError);
return NULL;
#else
int sval;
if (SEM_GETVALUE(self->handle, &sval) < 0)
return mp_SetError(NULL, MP_STANDARD_ERROR);
/* some posix implementations use negative numbers to indicate
the number of waiting threads */
if (sval < 0)
sval = 0;
return PyInt_FromLong((long)sval);
#endif
}
static PyObject *
semlock_iszero(SemLockObject *self)
{
int sval;
#if HAVE_BROKEN_SEM_GETVALUE
if (sem_trywait(self->handle) < 0) {
if (errno == EAGAIN)
Py_RETURN_TRUE;
return mp_SetError(NULL, MP_STANDARD_ERROR);
} else {
if (sem_post(self->handle) < 0)
return mp_SetError(NULL, MP_STANDARD_ERROR);
Py_RETURN_FALSE;
}
#else
if (SEM_GETVALUE(self->handle, &sval) < 0)
return mp_SetError(NULL, MP_STANDARD_ERROR);
return PyBool_FromLong((long)sval == 0);
#endif
}
static PyObject *
semlock_afterfork(SemLockObject *self)
{
self->count = 0;
Py_RETURN_NONE;
}
/*
* Semaphore methods
*/
static PyMethodDef semlock_methods[] = {
{"acquire", (PyCFunction)semlock_acquire, METH_VARARGS | METH_KEYWORDS,
"acquire the semaphore/lock"},
{"release", (PyCFunction)semlock_release, METH_NOARGS,
"release the semaphore/lock"},
{"__enter__", (PyCFunction)semlock_acquire, METH_VARARGS,
"enter the semaphore/lock"},
{"__exit__", (PyCFunction)semlock_release, METH_VARARGS,
"exit the semaphore/lock"},
{"_count", (PyCFunction)semlock_count, METH_NOARGS,
"num of `acquire()`s minus num of `release()`s for this process"},
{"_is_mine", (PyCFunction)semlock_ismine, METH_NOARGS,
"whether the lock is owned by this thread"},
{"_get_value", (PyCFunction)semlock_getvalue, METH_NOARGS,
"get the value of the semaphore"},
{"_is_zero", (PyCFunction)semlock_iszero, METH_NOARGS,
"returns whether semaphore has value zero"},
{"_rebuild", (PyCFunction)semlock_rebuild, METH_VARARGS | METH_CLASS,
""},
{"_after_fork", (PyCFunction)semlock_afterfork, METH_NOARGS,
"rezero the net acquisition count after fork()"},
{NULL}
};
/*
* Member table
*/
static PyMemberDef semlock_members[] = {
{"handle", T_SEM_HANDLE, offsetof(SemLockObject, handle), READONLY,
""},
{"kind", T_INT, offsetof(SemLockObject, kind), READONLY,
""},
{"maxvalue", T_INT, offsetof(SemLockObject, maxvalue), READONLY,
""},
{NULL}
};
/*
* Semaphore type
*/
PyTypeObject SemLockType = {
PyVarObject_HEAD_INIT(NULL, 0)
/* tp_name */ "_multiprocessing.SemLock",
/* tp_basicsize */ sizeof(SemLockObject),
/* tp_itemsize */ 0,
/* tp_dealloc */ (destructor)semlock_dealloc,
/* tp_print */ 0,
/* tp_getattr */ 0,
/* tp_setattr */ 0,
/* tp_compare */ 0,
/* tp_repr */ 0,
/* tp_as_number */ 0,
/* tp_as_sequence */ 0,
/* tp_as_mapping */ 0,
/* tp_hash */ 0,
/* tp_call */ 0,
/* tp_str */ 0,
/* tp_getattro */ 0,
/* tp_setattro */ 0,
/* tp_as_buffer */ 0,
/* tp_flags */ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE,
/* tp_doc */ "Semaphore/Mutex type",
/* tp_traverse */ 0,
/* tp_clear */ 0,
/* tp_richcompare */ 0,
/* tp_weaklistoffset */ 0,
/* tp_iter */ 0,
/* tp_iternext */ 0,
/* tp_methods */ semlock_methods,
/* tp_members */ semlock_members,
/* tp_getset */ 0,
/* tp_base */ 0,
/* tp_dict */ 0,
/* tp_descr_get */ 0,
/* tp_descr_set */ 0,
/* tp_dictoffset */ 0,
/* tp_init */ 0,
/* tp_alloc */ 0,
/* tp_new */ semlock_new,
};
/*
* A type which wraps a socket
*
* socket_connection.c
*
* Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
*/
#include "multiprocessing.h"
#ifdef MS_WINDOWS
# define WRITE(h, buffer, length) send((SOCKET)h, buffer, length, 0)
# define READ(h, buffer, length) recv((SOCKET)h, buffer, length, 0)
# define CLOSE(h) closesocket((SOCKET)h)
#else
# define WRITE(h, buffer, length) write(h, buffer, length)
# define READ(h, buffer, length) read(h, buffer, length)
# define CLOSE(h) close(h)
#endif
/*
* Send string to file descriptor
*/
static Py_ssize_t
_conn_sendall(HANDLE h, char *string, size_t length)
{
char *p = string;
Py_ssize_t res;
while (length > 0) {
res = WRITE(h, p, length);
if (res < 0)
return MP_SOCKET_ERROR;
length -= res;
p += res;
}
return MP_SUCCESS;
}
/*
* Receive string of exact length from file descriptor
*/
static Py_ssize_t
_conn_recvall(HANDLE h, char *buffer, size_t length)
{
size_t remaining = length;
Py_ssize_t temp;
char *p = buffer;
while (remaining > 0) {
temp = READ(h, p, remaining);
if (temp <= 0) {
if (temp == 0)
return remaining == length ?
MP_END_OF_FILE : MP_EARLY_END_OF_FILE;
else
return temp;
}
remaining -= temp;
p += temp;
}
return MP_SUCCESS;
}
/*
* Send a string prepended by the string length in network byte order
*/
static Py_ssize_t
conn_send_string(ConnectionObject *conn, char *string, size_t length)
{
/* The "header" of the message is a 32 bit unsigned number (in
network order) which specifies the length of the "body". If
the message is shorter than about 16kb then it is quicker to
combine the "header" and the "body" of the message and send
them at once. */
if (length < (16*1024)) {
char *message;
int res;
message = PyMem_Malloc(length+4);
if (message == NULL)
return MP_MEMORY_ERROR;
*(UINT32*)message = htonl((UINT32)length);
memcpy(message+4, string, length);
res = _conn_sendall(conn->handle, message, length+4);
PyMem_Free(message);
return res;
} else {
UINT32 lenbuff;
if (length > MAX_MESSAGE_LENGTH)
return MP_BAD_MESSAGE_LENGTH;
lenbuff = htonl((UINT32)length);
return _conn_sendall(conn->handle, (char*)&lenbuff, 4) ||
_conn_sendall(conn->handle, string, length);
}
}
/*
* Attempts to read into buffer, or failing that into *newbuffer
*
* Returns number of bytes read.
*/
static Py_ssize_t
conn_recv_string(ConnectionObject *conn, char *buffer,
size_t buflength, char **newbuffer, size_t maxlength)
{
int res;
UINT32 ulength;
*newbuffer = NULL;
res = _conn_recvall(conn->handle, (char*)&ulength, 4);
if (res < 0)
return res;
ulength = ntohl(ulength);
if (ulength > maxlength)
return MP_BAD_MESSAGE_LENGTH;
if (ulength <= buflength) {
res = _conn_recvall(conn->handle, buffer, (size_t)ulength);
return res < 0 ? res : ulength;
} else {
*newbuffer = PyMem_Malloc((size_t)ulength);
if (*newbuffer == NULL)
return MP_MEMORY_ERROR;
res = _conn_recvall(conn->handle, *newbuffer, (size_t)ulength);
return res < 0 ? (Py_ssize_t)res : (Py_ssize_t)ulength;
}
}
/*
* Check whether any data is available for reading -- neg timeout blocks
*/
static int
conn_poll(ConnectionObject *conn, double timeout)
{
int res;
fd_set rfds;
FD_ZERO(&rfds);
FD_SET((SOCKET)conn->handle, &rfds);
if (timeout < 0.0) {
res = select((int)conn->handle+1, &rfds, NULL, NULL, NULL);
} else {
struct timeval tv;
tv.tv_sec = (long)timeout;
tv.tv_usec = (long)((timeout - tv.tv_sec) * 1e6 + 0.5);
res = select((int)conn->handle+1, &rfds, NULL, NULL, &tv);
}
if (res < 0) {
return MP_SOCKET_ERROR;
} else if (FD_ISSET(conn->handle, &rfds)) {
return TRUE;
} else {
assert(res == 0);
return FALSE;
}
}
/*
* "connection.h" defines the Connection type using defs above
*/
#define CONNECTION_NAME "Connection"
#define CONNECTION_TYPE ConnectionType
#include "connection.h"
/*
* Win32 functions used by multiprocessing package
*
* win32_functions.c
*
* Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
*/
#include "multiprocessing.h"
#define WIN32_FUNCTION(func) \
{#func, (PyCFunction)win32_ ## func, METH_VARARGS | METH_STATIC, ""}
#define WIN32_CONSTANT(fmt, con) \
PyDict_SetItemString(Win32Type.tp_dict, #con, Py_BuildValue(fmt, con))
static PyObject *
win32_CloseHandle(PyObject *self, PyObject *args)
{
HANDLE hObject;
BOOL success;
if (!PyArg_ParseTuple(args, F_HANDLE, &hObject))
return NULL;
Py_BEGIN_ALLOW_THREADS
success = CloseHandle(hObject);
Py_END_ALLOW_THREADS
if (!success)
return PyErr_SetFromWindowsErr(0);
Py_RETURN_NONE;
}
static PyObject *
win32_ConnectNamedPipe(PyObject *self, PyObject *args)
{
HANDLE hNamedPipe;
LPOVERLAPPED lpOverlapped;
BOOL success;
if (!PyArg_ParseTuple(args, F_HANDLE F_POINTER,
&hNamedPipe, &lpOverlapped))
return NULL;
Py_BEGIN_ALLOW_THREADS
success = ConnectNamedPipe(hNamedPipe, lpOverlapped);
Py_END_ALLOW_THREADS
if (!success)
return PyErr_SetFromWindowsErr(0);
Py_RETURN_NONE;
}
static PyObject *
win32_CreateFile(PyObject *self, PyObject *args)
{
LPCTSTR lpFileName;
DWORD dwDesiredAccess;
DWORD dwShareMode;
LPSECURITY_ATTRIBUTES lpSecurityAttributes;
DWORD dwCreationDisposition;
DWORD dwFlagsAndAttributes;
HANDLE hTemplateFile;
HANDLE handle;
if (!PyArg_ParseTuple(args, "s" F_DWORD F_DWORD F_POINTER
F_DWORD F_DWORD F_HANDLE,
&lpFileName, &dwDesiredAccess, &dwShareMode,
&lpSecurityAttributes, &dwCreationDisposition,
&dwFlagsAndAttributes, &hTemplateFile))
return NULL;
Py_BEGIN_ALLOW_THREADS
handle = CreateFile(lpFileName, dwDesiredAccess,
dwShareMode, lpSecurityAttributes,
dwCreationDisposition,
dwFlagsAndAttributes, hTemplateFile);
Py_END_ALLOW_THREADS
if (handle == INVALID_HANDLE_VALUE)
return PyErr_SetFromWindowsErr(0);
return Py_BuildValue(F_HANDLE, handle);
}
static PyObject *
win32_CreateNamedPipe(PyObject *self, PyObject *args)
{
LPCTSTR lpName;
DWORD dwOpenMode;
DWORD dwPipeMode;
DWORD nMaxInstances;
DWORD nOutBufferSize;
DWORD nInBufferSize;
DWORD nDefaultTimeOut;
LPSECURITY_ATTRIBUTES lpSecurityAttributes;
HANDLE handle;
if (!PyArg_ParseTuple(args, "s" F_DWORD F_DWORD F_DWORD
F_DWORD F_DWORD F_DWORD F_POINTER,
&lpName, &dwOpenMode, &dwPipeMode,
&nMaxInstances, &nOutBufferSize,
&nInBufferSize, &nDefaultTimeOut,
&lpSecurityAttributes))
return NULL;
Py_BEGIN_ALLOW_THREADS
handle = CreateNamedPipe(lpName, dwOpenMode, dwPipeMode,
nMaxInstances, nOutBufferSize,
nInBufferSize, nDefaultTimeOut,
lpSecurityAttributes);
Py_END_ALLOW_THREADS
if (handle == INVALID_HANDLE_VALUE)
return PyErr_SetFromWindowsErr(0);
return Py_BuildValue(F_HANDLE, handle);
}
static PyObject *
win32_ExitProcess(PyObject *self, PyObject *args)
{
UINT uExitCode;
if (!PyArg_ParseTuple(args, "I", &uExitCode))
return NULL;
ExitProcess(uExitCode);
return NULL;
}
static PyObject *
win32_GetLastError(PyObject *self, PyObject *args)
{
return Py_BuildValue(F_DWORD, GetLastError());
}
static PyObject *
win32_OpenProcess(PyObject *self, PyObject *args)
{
DWORD dwDesiredAccess;
BOOL bInheritHandle;
DWORD dwProcessId;
HANDLE handle;
if (!PyArg_ParseTuple(args, F_DWORD "i" F_DWORD,
&dwDesiredAccess, &bInheritHandle, &dwProcessId))
return NULL;
handle = OpenProcess(dwDesiredAccess, bInheritHandle, dwProcessId);
if (handle == NULL)
return PyErr_SetFromWindowsErr(0);
return Py_BuildValue(F_HANDLE, handle);
}
static PyObject *
win32_SetNamedPipeHandleState(PyObject *self, PyObject *args)
{
HANDLE hNamedPipe;
PyObject *oArgs[3];
DWORD dwArgs[3], *pArgs[3] = {NULL, NULL, NULL};
int i;
if (!PyArg_ParseTuple(args, F_HANDLE "OOO",
&hNamedPipe, &oArgs[0], &oArgs[1], &oArgs[2]))
return NULL;
PyErr_Clear();
for (i = 0 ; i < 3 ; i++) {
if (oArgs[i] != Py_None) {
dwArgs[i] = PyInt_AsUnsignedLongMask(oArgs[i]);
if (PyErr_Occurred())
return NULL;
pArgs[i] = &dwArgs[i];
}
}
if (!SetNamedPipeHandleState(hNamedPipe, pArgs[0], pArgs[1], pArgs[2]))
return PyErr_SetFromWindowsErr(0);
Py_RETURN_NONE;
}
static PyObject *
win32_WaitNamedPipe(PyObject *self, PyObject *args)
{
LPCTSTR lpNamedPipeName;
DWORD nTimeOut;
BOOL success;
if (!PyArg_ParseTuple(args, "s" F_DWORD, &lpNamedPipeName, &nTimeOut))
return NULL;
Py_BEGIN_ALLOW_THREADS
success = WaitNamedPipe(lpNamedPipeName, nTimeOut);
Py_END_ALLOW_THREADS
if (!success)
return PyErr_SetFromWindowsErr(0);
Py_RETURN_NONE;
}
static PyMethodDef win32_methods[] = {
WIN32_FUNCTION(CloseHandle),
WIN32_FUNCTION(GetLastError),
WIN32_FUNCTION(OpenProcess),
WIN32_FUNCTION(ExitProcess),
WIN32_FUNCTION(ConnectNamedPipe),
WIN32_FUNCTION(CreateFile),
WIN32_FUNCTION(CreateNamedPipe),
WIN32_FUNCTION(SetNamedPipeHandleState),
WIN32_FUNCTION(WaitNamedPipe),
{NULL}
};
PyTypeObject Win32Type = {
PyVarObject_HEAD_INIT(NULL, 0)
};
PyObject *
create_win32_namespace(void)
{
Win32Type.tp_name = "_multiprocessing.win32";
Win32Type.tp_methods = win32_methods;
if (PyType_Ready(&Win32Type) < 0)
return NULL;
Py_INCREF(&Win32Type);
WIN32_CONSTANT(F_DWORD, ERROR_ALREADY_EXISTS);
WIN32_CONSTANT(F_DWORD, ERROR_PIPE_BUSY);
WIN32_CONSTANT(F_DWORD, ERROR_PIPE_CONNECTED);
WIN32_CONSTANT(F_DWORD, ERROR_SEM_TIMEOUT);
WIN32_CONSTANT(F_DWORD, GENERIC_READ);
WIN32_CONSTANT(F_DWORD, GENERIC_WRITE);
WIN32_CONSTANT(F_DWORD, INFINITE);
WIN32_CONSTANT(F_DWORD, NMPWAIT_WAIT_FOREVER);
WIN32_CONSTANT(F_DWORD, OPEN_EXISTING);
WIN32_CONSTANT(F_DWORD, PIPE_ACCESS_DUPLEX);
WIN32_CONSTANT(F_DWORD, PIPE_ACCESS_INBOUND);
WIN32_CONSTANT(F_DWORD, PIPE_READMODE_MESSAGE);
WIN32_CONSTANT(F_DWORD, PIPE_TYPE_MESSAGE);
WIN32_CONSTANT(F_DWORD, PIPE_UNLIMITED_INSTANCES);
WIN32_CONSTANT(F_DWORD, PIPE_WAIT);
WIN32_CONSTANT(F_DWORD, PROCESS_ALL_ACCESS);
WIN32_CONSTANT("i", NULL);
return (PyObject*)&Win32Type;
}
......@@ -1236,6 +1236,58 @@ class PyBuildExt(build_ext):
# Thomas Heller's _ctypes module
self.detect_ctypes(inc_dirs, lib_dirs)
# Richard Oudkerk's multiprocessing module
if platform == 'win32': # Windows
macros = dict()
libraries = ['ws2_32']
elif platform == 'darwin': # Mac OSX
macros = dict(
HAVE_SEM_OPEN=1,
HAVE_SEM_TIMEDWAIT=0,
HAVE_FD_TRANSFER=1,
HAVE_BROKEN_SEM_GETVALUE=1
)
libraries = []
elif platform == 'cygwin': # Cygwin
macros = dict(
HAVE_SEM_OPEN=1,
HAVE_SEM_TIMEDWAIT=1,
HAVE_FD_TRANSFER=0,
HAVE_BROKEN_SEM_UNLINK=1
)
libraries = []
else: # Linux and other unices
macros = dict(
HAVE_SEM_OPEN=1,
HAVE_SEM_TIMEDWAIT=1,
HAVE_FD_TRANSFER=1
)
libraries = ['rt']
if platform == 'win32':
multiprocessing_srcs = [ '_multiprocessing/multiprocessing.c',
'_multiprocessing/semaphore.c',
'_multiprocessing/pipe_connection.c',
'_multiprocessing/socket_connection.c',
'_multiprocessing/win32_functions.c'
]
else:
multiprocessing_srcs = [ '_multiprocessing/multiprocessing.c',
'_multiprocessing/socket_connection.c'
]
if macros.get('HAVE_SEM_OPEN', False):
multiprocessing_srcs.append('_multiprocessing/semaphore.c')
exts.append ( Extension('_multiprocessing', multiprocessing_srcs,
define_macros=macros.items(),
include_dirs=["Modules/_multiprocessing"]))
# End multiprocessing
# Platform-specific libraries
if platform == 'linux2':
# Linux-specific modules
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment