Commit aae1b70a authored by Christian Heimes's avatar Christian Heimes

2to3 run of multiprocessing examples.

mp_benchmarks, mp_newtypes and mp_distribution are still broken but the others are working properly. We should include the examples in our unit test suite ...
parent 94e07729
......@@ -2,7 +2,7 @@
# Simple benchmarks for the multiprocessing package
#
import time, sys, multiprocessing, threading, Queue, gc
import time, sys, multiprocessing, threading, queue, gc
if sys.platform == 'win32':
_timer = time.clock
......@@ -20,7 +20,7 @@ def queuespeed_func(q, c, iterations):
c.notify()
c.release()
for i in xrange(iterations):
for i in range(iterations):
q.put(a)
q.put('STOP')
......@@ -48,8 +48,8 @@ def test_queuespeed(Process, q, c):
p.join()
print iterations, 'objects passed through the queue in', elapsed, 'seconds'
print 'average number/sec:', iterations/elapsed
print(iterations, 'objects passed through the queue in', elapsed, 'seconds')
print('average number/sec:', iterations/elapsed)
#### TEST_PIPESPEED
......@@ -60,7 +60,7 @@ def pipe_func(c, cond, iterations):
cond.notify()
cond.release()
for i in xrange(iterations):
for i in range(iterations):
c.send(a)
c.send('STOP')
......@@ -90,8 +90,8 @@ def test_pipespeed():
elapsed = _timer() - t
p.join()
print iterations, 'objects passed through connection in',elapsed,'seconds'
print 'average number/sec:', iterations/elapsed
print(iterations, 'objects passed through connection in',elapsed,'seconds')
print('average number/sec:', iterations/elapsed)
#### TEST_SEQSPEED
......@@ -105,13 +105,13 @@ def test_seqspeed(seq):
t = _timer()
for i in xrange(iterations):
for i in range(iterations):
a = seq[5]
elapsed = _timer()-t
print iterations, 'iterations in', elapsed, 'seconds'
print 'average number/sec:', iterations/elapsed
print(iterations, 'iterations in', elapsed, 'seconds')
print('average number/sec:', iterations/elapsed)
#### TEST_LOCK
......@@ -125,14 +125,14 @@ def test_lockspeed(l):
t = _timer()
for i in xrange(iterations):
for i in range(iterations):
l.acquire()
l.release()
elapsed = _timer()-t
print iterations, 'iterations in', elapsed, 'seconds'
print 'average number/sec:', iterations/elapsed
print(iterations, 'iterations in', elapsed, 'seconds')
print('average number/sec:', iterations/elapsed)
#### TEST_CONDITION
......@@ -141,7 +141,7 @@ def conditionspeed_func(c, N):
c.acquire()
c.notify()
for i in xrange(N):
for i in range(N):
c.wait()
c.notify()
......@@ -162,7 +162,7 @@ def test_conditionspeed(Process, c):
t = _timer()
for i in xrange(iterations):
for i in range(iterations):
c.notify()
c.wait()
......@@ -171,8 +171,8 @@ def test_conditionspeed(Process, c):
c.release()
p.join()
print iterations * 2, 'waits in', elapsed, 'seconds'
print 'average number/sec:', iterations * 2 / elapsed
print(iterations * 2, 'waits in', elapsed, 'seconds')
print('average number/sec:', iterations * 2 / elapsed)
####
......@@ -181,51 +181,51 @@ def test():
gc.disable()
print '\n\t######## testing Queue.Queue\n'
test_queuespeed(threading.Thread, Queue.Queue(),
print('\n\t######## testing Queue.Queue\n')
test_queuespeed(threading.Thread, queue.Queue(),
threading.Condition())
print '\n\t######## testing multiprocessing.Queue\n'
print('\n\t######## testing multiprocessing.Queue\n')
test_queuespeed(multiprocessing.Process, multiprocessing.Queue(),
multiprocessing.Condition())
print '\n\t######## testing Queue managed by server process\n'
print('\n\t######## testing Queue managed by server process\n')
test_queuespeed(multiprocessing.Process, manager.Queue(),
manager.Condition())
print '\n\t######## testing multiprocessing.Pipe\n'
print('\n\t######## testing multiprocessing.Pipe\n')
test_pipespeed()
print
print()
print '\n\t######## testing list\n'
test_seqspeed(range(10))
print '\n\t######## testing list managed by server process\n'
test_seqspeed(manager.list(range(10)))
print '\n\t######## testing Array("i", ..., lock=False)\n'
test_seqspeed(multiprocessing.Array('i', range(10), lock=False))
print '\n\t######## testing Array("i", ..., lock=True)\n'
test_seqspeed(multiprocessing.Array('i', range(10), lock=True))
print('\n\t######## testing list\n')
test_seqspeed(list(range(10)))
print('\n\t######## testing list managed by server process\n')
test_seqspeed(manager.list(list(range(10))))
print('\n\t######## testing Array("i", ..., lock=False)\n')
test_seqspeed(multiprocessing.Array('i', list(range(10)), lock=False))
print('\n\t######## testing Array("i", ..., lock=True)\n')
test_seqspeed(multiprocessing.Array('i', list(range(10)), lock=True))
print
print()
print '\n\t######## testing threading.Lock\n'
print('\n\t######## testing threading.Lock\n')
test_lockspeed(threading.Lock())
print '\n\t######## testing threading.RLock\n'
print('\n\t######## testing threading.RLock\n')
test_lockspeed(threading.RLock())
print '\n\t######## testing multiprocessing.Lock\n'
print('\n\t######## testing multiprocessing.Lock\n')
test_lockspeed(multiprocessing.Lock())
print '\n\t######## testing multiprocessing.RLock\n'
print('\n\t######## testing multiprocessing.RLock\n')
test_lockspeed(multiprocessing.RLock())
print '\n\t######## testing lock managed by server process\n'
print('\n\t######## testing lock managed by server process\n')
test_lockspeed(manager.Lock())
print '\n\t######## testing rlock managed by server process\n'
print('\n\t######## testing rlock managed by server process\n')
test_lockspeed(manager.RLock())
print
print()
print '\n\t######## testing threading.Condition\n'
print('\n\t######## testing threading.Condition\n')
test_conditionspeed(threading.Thread, threading.Condition())
print '\n\t######## testing multiprocessing.Condition\n'
print('\n\t######## testing multiprocessing.Condition\n')
test_conditionspeed(multiprocessing.Process, multiprocessing.Condition())
print '\n\t######## testing condition managed by a server process\n'
print('\n\t######## testing condition managed by a server process\n')
test_conditionspeed(multiprocessing.Process, manager.Condition())
gc.enable()
......
......@@ -11,15 +11,15 @@ import operator
class Foo(object):
def f(self):
print 'you called Foo.f()'
print('you called Foo.f()')
def g(self):
print 'you called Foo.g()'
print('you called Foo.g()')
def _h(self):
print 'you called Foo._h()'
print('you called Foo._h()')
# A simple generator function
def baz():
for i in xrange(10):
for i in range(10):
yield i*i
# Proxy type for generator objects
......@@ -27,7 +27,7 @@ class GeneratorProxy(BaseProxy):
_exposed_ = ('next', '__next__')
def __iter__(self):
return self
def next(self):
def __next__(self):
return self._callmethod('next')
def __next__(self):
return self._callmethod('__next__')
......@@ -59,7 +59,7 @@ def test():
manager = MyManager()
manager.start()
print '-' * 20
print('-' * 20)
f1 = manager.Foo1()
f1.f()
......@@ -67,7 +67,7 @@ def test():
assert not hasattr(f1, '_h')
assert sorted(f1._exposed_) == sorted(['f', 'g'])
print '-' * 20
print('-' * 20)
f2 = manager.Foo2()
f2.g()
......@@ -75,21 +75,21 @@ def test():
assert not hasattr(f2, 'f')
assert sorted(f2._exposed_) == sorted(['g', '_h'])
print '-' * 20
print('-' * 20)
it = manager.baz()
for i in it:
print '<%d>' % i,
print
print('<%d>' % i, end=' ')
print()
print '-' * 20
print('-' * 20)
op = manager.operator()
print 'op.add(23, 45) =', op.add(23, 45)
print 'op.pow(2, 94) =', op.pow(2, 94)
print 'op.getslice(range(10), 2, 6) =', op.getslice(range(10), 2, 6)
print 'op.repeat(range(5), 3) =', op.repeat(range(5), 3)
print 'op._exposed_ =', op._exposed_
print('op.add(23, 45) =', op.add(23, 45))
print('op.pow(2, 94) =', op.pow(2, 94))
print('op.getslice(range(10), 2, 6) =', op.getslice(list(range(10)), 2, 6))
print('op.repeat(range(5), 3) =', op.repeat(list(range(5)), 3))
print('op._exposed_ =', op._exposed_)
##
......
......@@ -43,17 +43,17 @@ def noop(x):
#
def test():
print 'cpu_count() = %d\n' % multiprocessing.cpu_count()
print('cpu_count() = %d\n' % multiprocessing.cpu_count())
#
# Create pool
#
PROCESSES = 4
print 'Creating pool with %d processes\n' % PROCESSES
print('Creating pool with %d processes\n' % PROCESSES)
pool = multiprocessing.Pool(PROCESSES)
print 'pool = %s' % pool
print
print('pool = %s' % pool)
print()
#
# Tests
......@@ -66,72 +66,72 @@ def test():
imap_it = pool.imap(calculatestar, TASKS)
imap_unordered_it = pool.imap_unordered(calculatestar, TASKS)
print 'Ordered results using pool.apply_async():'
print('Ordered results using pool.apply_async():')
for r in results:
print '\t', r.get()
print
print('\t', r.get())
print()
print 'Ordered results using pool.imap():'
print('Ordered results using pool.imap():')
for x in imap_it:
print '\t', x
print
print('\t', x)
print()
print 'Unordered results using pool.imap_unordered():'
print('Unordered results using pool.imap_unordered():')
for x in imap_unordered_it:
print '\t', x
print
print('\t', x)
print()
print 'Ordered results using pool.map() --- will block till complete:'
print('Ordered results using pool.map() --- will block till complete:')
for x in pool.map(calculatestar, TASKS):
print '\t', x
print
print('\t', x)
print()
#
# Simple benchmarks
#
N = 100000
print 'def pow3(x): return x**3'
print('def pow3(x): return x**3')
t = time.time()
A = map(pow3, xrange(N))
print '\tmap(pow3, xrange(%d)):\n\t\t%s seconds' % \
(N, time.time() - t)
A = list(map(pow3, range(N)))
print('\tmap(pow3, xrange(%d)):\n\t\t%s seconds' % \
(N, time.time() - t))
t = time.time()
B = pool.map(pow3, xrange(N))
print '\tpool.map(pow3, xrange(%d)):\n\t\t%s seconds' % \
(N, time.time() - t)
B = pool.map(pow3, range(N))
print('\tpool.map(pow3, xrange(%d)):\n\t\t%s seconds' % \
(N, time.time() - t))
t = time.time()
C = list(pool.imap(pow3, xrange(N), chunksize=N//8))
print '\tlist(pool.imap(pow3, xrange(%d), chunksize=%d)):\n\t\t%s' \
' seconds' % (N, N//8, time.time() - t)
C = list(pool.imap(pow3, range(N), chunksize=N//8))
print('\tlist(pool.imap(pow3, xrange(%d), chunksize=%d)):\n\t\t%s' \
' seconds' % (N, N//8, time.time() - t))
assert A == B == C, (len(A), len(B), len(C))
print
print()
L = [None] * 1000000
print 'def noop(x): pass'
print 'L = [None] * 1000000'
print('def noop(x): pass')
print('L = [None] * 1000000')
t = time.time()
A = map(noop, L)
print '\tmap(noop, L):\n\t\t%s seconds' % \
(time.time() - t)
A = list(map(noop, L))
print('\tmap(noop, L):\n\t\t%s seconds' % \
(time.time() - t))
t = time.time()
B = pool.map(noop, L)
print '\tpool.map(noop, L):\n\t\t%s seconds' % \
(time.time() - t)
print('\tpool.map(noop, L):\n\t\t%s seconds' % \
(time.time() - t))
t = time.time()
C = list(pool.imap(noop, L, chunksize=len(L)//8))
print '\tlist(pool.imap(noop, L, chunksize=%d)):\n\t\t%s seconds' % \
(len(L)//8, time.time() - t)
print('\tlist(pool.imap(noop, L, chunksize=%d)):\n\t\t%s seconds' % \
(len(L)//8, time.time() - t))
assert A == B == C, (len(A), len(B), len(C))
print
print()
del A, B, C, L
......@@ -139,33 +139,33 @@ def test():
# Test error handling
#
print 'Testing error handling:'
print('Testing error handling:')
try:
print pool.apply(f, (5,))
print(pool.apply(f, (5,)))
except ZeroDivisionError:
print '\tGot ZeroDivisionError as expected from pool.apply()'
print('\tGot ZeroDivisionError as expected from pool.apply()')
else:
raise AssertionError, 'expected ZeroDivisionError'
raise AssertionError('expected ZeroDivisionError')
try:
print pool.map(f, range(10))
print(pool.map(f, list(range(10))))
except ZeroDivisionError:
print '\tGot ZeroDivisionError as expected from pool.map()'
print('\tGot ZeroDivisionError as expected from pool.map()')
else:
raise AssertionError, 'expected ZeroDivisionError'
raise AssertionError('expected ZeroDivisionError')
try:
print list(pool.imap(f, range(10)))
print(list(pool.imap(f, list(range(10)))))
except ZeroDivisionError:
print '\tGot ZeroDivisionError as expected from list(pool.imap())'
print('\tGot ZeroDivisionError as expected from list(pool.imap())')
else:
raise AssertionError, 'expected ZeroDivisionError'
raise AssertionError('expected ZeroDivisionError')
it = pool.imap(f, range(10))
it = pool.imap(f, list(range(10)))
for i in range(10):
try:
x = it.next()
x = next(it)
except ZeroDivisionError:
if i == 5:
pass
......@@ -173,17 +173,17 @@ def test():
break
else:
if i == 5:
raise AssertionError, 'expected ZeroDivisionError'
raise AssertionError('expected ZeroDivisionError')
assert i == 9
print '\tGot ZeroDivisionError as expected from IMapIterator.next()'
print
print('\tGot ZeroDivisionError as expected from IMapIterator.next()')
print()
#
# Testing timeouts
#
print 'Testing ApplyResult.get() with timeout:',
print('Testing ApplyResult.get() with timeout:', end=' ')
res = pool.apply_async(calculate, TASKS[0])
while 1:
sys.stdout.flush()
......@@ -192,10 +192,10 @@ def test():
break
except multiprocessing.TimeoutError:
sys.stdout.write('.')
print
print
print()
print()
print 'Testing IMapIterator.next() with timeout:',
print('Testing IMapIterator.next() with timeout:', end=' ')
it = pool.imap(calculatestar, TASKS)
while 1:
sys.stdout.flush()
......@@ -205,14 +205,14 @@ def test():
break
except multiprocessing.TimeoutError:
sys.stdout.write('.')
print
print
print()
print()
#
# Testing callback
#
print 'Testing callback:'
print('Testing callback:')
A = []
B = [56, 0, 1, 8, 27, 64, 125, 216, 343, 512, 729]
......@@ -220,13 +220,13 @@ def test():
r = pool.apply_async(mul, (7, 8), callback=A.append)
r.wait()
r = pool.map_async(pow3, range(10), callback=A.extend)
r = pool.map_async(pow3, list(range(10)), callback=A.extend)
r.wait()
if A == B:
print '\tcallbacks succeeded\n'
print('\tcallbacks succeeded\n')
else:
print '\t*** callbacks failed\n\t\t%s != %s\n' % (A, B)
print('\t*** callbacks failed\n\t\t%s != %s\n' % (A, B))
#
# Check there are no outstanding tasks
......@@ -238,7 +238,7 @@ def test():
# Check close() methods
#
print 'Testing close():'
print('Testing close():')
for worker in pool._pool:
assert worker.is_alive()
......@@ -252,13 +252,13 @@ def test():
for worker in pool._pool:
assert not worker.is_alive()
print '\tclose() succeeded\n'
print('\tclose() succeeded\n')
#
# Check terminate() method
#
print 'Testing terminate():'
print('Testing terminate():')
pool = multiprocessing.Pool(2)
DELTA = 0.1
......@@ -270,13 +270,13 @@ def test():
for worker in pool._pool:
assert not worker.is_alive()
print '\tterminate() succeeded\n'
print('\tterminate() succeeded\n')
#
# Check garbage collection
#
print 'Testing garbage collection:'
print('Testing garbage collection:')
pool = multiprocessing.Pool(2)
DELTA = 0.1
......@@ -291,7 +291,7 @@ def test():
for worker in processes:
assert not worker.is_alive()
print '\tgarbage collection succeeded\n'
print('\tgarbage collection succeeded\n')
if __name__ == '__main__':
......@@ -300,12 +300,12 @@ if __name__ == '__main__':
assert len(sys.argv) in (1, 2)
if len(sys.argv) == 1 or sys.argv[1] == 'processes':
print ' Using processes '.center(79, '-')
print(' Using processes '.center(79, '-'))
elif sys.argv[1] == 'threads':
print ' Using threads '.center(79, '-')
print(' Using threads '.center(79, '-'))
import multiprocessing.dummy as multiprocessing
else:
print 'Usage:\n\t%s [processes | threads]' % sys.argv[0]
print('Usage:\n\t%s [processes | threads]' % sys.argv[0])
raise SystemExit(2)
test()
......@@ -3,7 +3,7 @@
#
import time, sys, random
from Queue import Empty
from queue import Empty
import multiprocessing # may get overwritten
......@@ -15,7 +15,7 @@ def value_func(running, mutex):
time.sleep(random.random()*4)
mutex.acquire()
print '\n\t\t\t' + str(multiprocessing.current_process()) + ' has finished'
print('\n\t\t\t' + str(multiprocessing.current_process()) + ' has finished')
running.value -= 1
mutex.release()
......@@ -31,12 +31,12 @@ def test_value():
while running.value > 0:
time.sleep(0.08)
mutex.acquire()
print running.value,
print(running.value, end=' ')
sys.stdout.flush()
mutex.release()
print
print 'No more running processes'
print()
print('No more running processes')
#### TEST_QUEUE
......@@ -57,22 +57,22 @@ def test_queue():
while o != 'STOP':
try:
o = q.get(timeout=0.3)
print o,
print(o, end=' ')
sys.stdout.flush()
except Empty:
print 'TIMEOUT'
print('TIMEOUT')
print
print()
#### TEST_CONDITION
def condition_func(cond):
cond.acquire()
print '\t' + str(cond)
print('\t' + str(cond))
time.sleep(2)
print '\tchild is notifying'
print '\t' + str(cond)
print('\tchild is notifying')
print('\t' + str(cond))
cond.notify()
cond.release()
......@@ -80,26 +80,26 @@ def test_condition():
cond = multiprocessing.Condition()
p = multiprocessing.Process(target=condition_func, args=(cond,))
print cond
print(cond)
cond.acquire()
print cond
print(cond)
cond.acquire()
print cond
print(cond)
p.start()
print 'main is waiting'
print('main is waiting')
cond.wait()
print 'main has woken up'
print('main has woken up')
print cond
print(cond)
cond.release()
print cond
print(cond)
cond.release()
p.join()
print cond
print(cond)
#### TEST_SEMAPHORE
......@@ -109,7 +109,7 @@ def semaphore_func(sema, mutex, running):
mutex.acquire()
running.value += 1
print running.value, 'tasks are running'
print(running.value, 'tasks are running')
mutex.release()
random.seed()
......@@ -117,7 +117,7 @@ def semaphore_func(sema, mutex, running):
mutex.acquire()
running.value -= 1
print '%s has finished' % multiprocessing.current_process()
print('%s has finished' % multiprocessing.current_process())
mutex.release()
sema.release()
......@@ -143,30 +143,30 @@ def test_semaphore():
#### TEST_JOIN_TIMEOUT
def join_timeout_func():
print '\tchild sleeping'
print('\tchild sleeping')
time.sleep(5.5)
print '\n\tchild terminating'
print('\n\tchild terminating')
def test_join_timeout():
p = multiprocessing.Process(target=join_timeout_func)
p.start()
print 'waiting for process to finish'
print('waiting for process to finish')
while 1:
p.join(timeout=1)
if not p.is_alive():
break
print '.',
print('.', end=' ')
sys.stdout.flush()
#### TEST_EVENT
def event_func(event):
print '\t%r is waiting' % multiprocessing.current_process()
print('\t%r is waiting' % multiprocessing.current_process())
event.wait()
print '\t%r has woken up' % multiprocessing.current_process()
print('\t%r has woken up' % multiprocessing.current_process())
def test_event():
event = multiprocessing.Event()
......@@ -177,10 +177,10 @@ def test_event():
for p in processes:
p.start()
print 'main is sleeping'
print('main is sleeping')
time.sleep(2)
print 'main is setting event'
print('main is setting event')
event.set()
for p in processes:
......@@ -200,7 +200,7 @@ def sharedvalues_func(values, arrays, shared_values, shared_arrays):
sa = list(shared_arrays[i][:])
assert a == sa
print 'Tests passed'
print('Tests passed')
def test_sharedvalues():
values = [
......@@ -209,9 +209,9 @@ def test_sharedvalues():
('d', 1.25)
]
arrays = [
('i', range(100)),
('i', list(range(100))),
('d', [0.25 * i for i in range(100)]),
('H', range(1000))
('H', list(range(1000)))
]
shared_values = [multiprocessing.Value(id, v) for id, v in values]
......@@ -238,15 +238,15 @@ def test(namespace=multiprocessing):
test_semaphore, test_join_timeout, test_event,
test_sharedvalues ]:
print '\n\t######## %s\n' % func.__name__
print('\n\t######## %s\n' % func.__name__)
func()
ignore = multiprocessing.active_children() # cleanup any old processes
if hasattr(multiprocessing, '_debug_info'):
info = multiprocessing._debug_info()
if info:
print info
raise ValueError, 'there should be no positive refcounts left'
print(info)
raise ValueError('there should be no positive refcounts left')
if __name__ == '__main__':
......@@ -255,19 +255,19 @@ if __name__ == '__main__':
assert len(sys.argv) in (1, 2)
if len(sys.argv) == 1 or sys.argv[1] == 'processes':
print ' Using processes '.center(79, '-')
print(' Using processes '.center(79, '-'))
namespace = multiprocessing
elif sys.argv[1] == 'manager':
print ' Using processes and a manager '.center(79, '-')
print(' Using processes and a manager '.center(79, '-'))
namespace = multiprocessing.Manager()
namespace.Process = multiprocessing.Process
namespace.current_process = multiprocessing.current_process
namespace.active_children = multiprocessing.active_children
elif sys.argv[1] == 'threads':
print ' Using threads '.center(79, '-')
print(' Using threads '.center(79, '-'))
import multiprocessing.dummy as namespace
else:
print 'Usage:\n\t%s [processes | manager | threads]' % sys.argv[0]
raise SystemExit, 2
print('Usage:\n\t%s [processes | manager | threads]' % sys.argv[0])
raise SystemExit(2)
test(namespace)
......@@ -13,8 +13,8 @@ import os
import sys
from multiprocessing import Process, current_process, freeze_support
from BaseHTTPServer import HTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
from http.server import HTTPServer
from http.server import SimpleHTTPRequestHandler
if sys.platform == 'win32':
import multiprocessing.reduction # make sockets pickable/inheritable
......@@ -54,9 +54,9 @@ def test():
ADDRESS = ('localhost', 8000)
NUMBER_OF_PROCESSES = 4
print 'Serving at http://%s:%d using %d worker processes' % \
(ADDRESS[0], ADDRESS[1], NUMBER_OF_PROCESSES)
print 'To exit press Ctrl-' + ['C', 'Break'][sys.platform=='win32']
print('Serving at http://%s:%d using %d worker processes' % \
(ADDRESS[0], ADDRESS[1], NUMBER_OF_PROCESSES))
print('To exit press Ctrl-' + ['C', 'Break'][sys.platform=='win32'])
os.chdir(DIR)
runpool(ADDRESS, NUMBER_OF_PROCESSES)
......
......@@ -65,9 +65,9 @@ def test():
Process(target=worker, args=(task_queue, done_queue)).start()
# Get and print results
print 'Unordered results:'
print('Unordered results:')
for i in range(len(TASKS1)):
print '\t', done_queue.get()
print('\t', done_queue.get())
# Add more tasks using `put()`
for task in TASKS2:
......@@ -75,7 +75,7 @@ def test():
# Get and print some more results
for i in range(len(TASKS2)):
print '\t', done_queue.get()
print('\t', done_queue.get())
# Tell child processes to stop
for i in range(NUMBER_OF_PROCESSES):
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment