__init__.py 24.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
#
# Copyright (c) 2011 Nexedi SARL and Contributors. All Rights Reserved.
#                    Julien Muchembled <jm@nexedi.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.

19
import os, random, socket, sys, tempfile, threading, time, types, weakref
20
from collections import deque
21
from itertools import count
22
from functools import wraps
23
from zlib import decompress
24 25 26 27 28 29
from mock import Mock
import transaction, ZODB
import neo.admin.app, neo.master.app, neo.storage.app
import neo.client.app, neo.neoctl.app
from neo.client import Storage
from neo.lib import bootstrap, setupLog
30
from neo.lib.connection import BaseConnection, Connection
31 32 33 34 35
from neo.lib.connector import SocketConnector, \
    ConnectorConnectionRefusedException
from neo.lib.event import EventManager
from neo.lib.protocol import CellStates, ClusterStates, NodeStates, NodeTypes
from neo.lib.util import SOCKET_CONNECTORS_DICT, parseMasterList
36
from .. import NeoTestBase, getTempDirectory, setupMySQLdb, \
37
    ADDRESS_TYPE, IP_VERSION_FORMAT_DICT, DB_PREFIX, DB_USER
38 39 40 41 42 43 44

BIND = IP_VERSION_FORMAT_DICT[ADDRESS_TYPE], 0
LOCAL_IP = socket.inet_pton(ADDRESS_TYPE, IP_VERSION_FORMAT_DICT[ADDRESS_TYPE])


class Serialized(object):

45 46 47 48
    @classmethod
    def init(cls):
        cls._global_lock = threading.Lock()
        cls._global_lock.acquire()
49 50
        cls._lock_list = deque()
        cls._lock_lock = threading.Lock()
51 52
        cls._pdb = False
        cls.pending = 0
53

54
    @classmethod
55
    def release(cls, lock=None, wake_other=True, stop=None):
56 57
        """Suspend lock owner and resume first suspended thread"""
        if lock is None:
58
            lock = cls._global_lock
59
            if stop: # XXX: we should fix ClusterStates.STOPPING
60
                cls.pending = frozenset(stop)
61
            else:
62
                cls.pending = 0
63 64
        try:
            sys._getframe(1).f_trace.im_self.set_continue()
65
            cls._pdb = True
66 67
        except AttributeError:
            pass
68
        q = cls._lock_list
69 70 71 72 73 74 75 76
        l = cls._lock_lock
        l.acquire()
        try:
            q.append(lock)
            if wake_other:
                q.popleft().release()
        finally:
            l.release()
77

78 79
    @classmethod
    def acquire(cls, lock=None):
80 81
        """Suspend all threads except lock owner"""
        if lock is None:
82
            lock = cls._global_lock
83
        lock.acquire()
84
        if type(cls.pending) is frozenset: # XXX
85 86
            if lock is cls._global_lock:
                cls.pending = 0
87
            elif threading.currentThread() in cls.pending:
88
                sys.exit()
89 90
        if cls._pdb:
            cls._pdb = False
91 92 93 94 95 96
            try:
                sys.stdout.write(threading.currentThread().node_name)
            except AttributeError:
                pass
            pdb(1)

97 98
    @classmethod
    def tic(cls, lock=None):
99 100
        # switch to another thread
        # (the following calls are not supposed to be debugged into)
101
        cls.release(lock); cls.acquire(lock)
102

103 104
    @classmethod
    def background(cls):
105
        with cls._lock_lock:
106 107
            if cls._lock_list:
                cls._lock_list.popleft().release()
108

109
class SerializedEventManager(EventManager):
110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135

    _lock = None
    _timeout = 0

    @classmethod
    def decorate(cls, func):
        def decorator(*args, **kw):
            try:
                EventManager.__init__ = types.MethodType(
                    cls.__init__.im_func, None, EventManager)
                return func(*args, **kw)
            finally:
                EventManager.__init__ = types.MethodType(
                    cls._super__init__.im_func, None, EventManager)
        return wraps(func)(decorator)

    _super__init__ = EventManager.__init__.im_func

    def __init__(self):
        cls = self.__class__
        assert cls is EventManager
        self.__class__ = SerializedEventManager
        self._super__init__()

    def _poll(self, timeout=1):
        if self._pending_processing:
136
            assert timeout <= 0
137 138 139 140
        elif 0 == self._timeout == timeout == Serialized.pending == len(
            self.writer_set):
            return
        else:
141
            if self.writer_set and Serialized.pending == 0:
142 143 144 145 146 147 148
                Serialized.pending = 1
            # Jump to another thread before polling, so that when a message is
            # sent on the network, one can debug immediately the receiving part.
            # XXX: Unfortunately, this means we have a useless full-cycle
            #      before the first message is sent.
            # TODO: Detect where a message is sent to jump immediately to nodes
            #       that will do something.
149
            Serialized.tic(self._lock)
150 151
            if timeout != 0:
                timeout = self._timeout
152
                if timeout != 0 and Serialized.pending == 1:
153 154 155 156
                    Serialized.pending = timeout = 0
        EventManager._poll(self, timeout)


157 158 159 160 161 162 163 164 165 166 167 168 169
class Node(object):

    def filterConnection(self, *peers):
        addr = lambda c: c and (c.accepted_from or c.getAddress())
        addr_set = set(addr(c.connector) for peer in peers
            for c in peer.em.connection_dict.itervalues()
            if isinstance(c, Connection))
        addr_set.discard(None)
        conn_list = (c for c in self.em.connection_dict.itervalues()
            if isinstance(c, Connection) and addr(c.connector) in addr_set)
        return ConnectionFilter(*conn_list)

class ServerNode(Node):
170

171 172
    _server_class_dict = {}

173 174 175
    class __metaclass__(type):
        def __init__(cls, name, bases, d):
            type.__init__(cls, name, bases, d)
176
            if Node not in bases and threading.Thread not in cls.__mro__:
177
                cls.__bases__ = bases + (threading.Thread,)
178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196
                cls.node_type = getattr(NodeTypes, name[:-11].upper())
                cls._node_list = []
                cls._virtual_ip = socket.inet_ntop(ADDRESS_TYPE,
                    LOCAL_IP[:-1] + chr(2 + len(cls._server_class_dict)))
                cls._server_class_dict[cls._virtual_ip] = cls

    @classmethod
    def newAddress(cls):
        address = cls._virtual_ip, len(cls._node_list)
        cls._node_list.append(None)
        return address

    @classmethod
    def resolv(cls, address):
        try:
            cls = cls._server_class_dict[address[0]]
        except KeyError:
            return address
        return cls._node_list[address[1]].getListeningAddress()
197 198

    @SerializedEventManager.decorate
199 200 201 202 203 204
    def __init__(self, cluster, address=None, **kw):
        if not address:
            address = self.newAddress()
        port = address[1]
        self._node_list[port] = weakref.proxy(self)
        self._init_args = (cluster, address), kw.copy()
205
        threading.Thread.__init__(self)
206
        self.daemon = True
207
        self.node_name = '%s_%u' % (self.node_type, port)
208 209 210 211
        kw.update(getCluster=cluster.name, getBind=address,
                  getMasters=parseMasterList(cluster.master_nodes, address))
        super(ServerNode, self).__init__(Mock(kw))

212 213 214
    def getVirtualAddress(self):
        return self._init_args[0][1]

215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272
    def resetNode(self):
        assert not self.isAlive()
        args, kw = self._init_args
        kw['getUUID'] = self.uuid
        self.__dict__.clear()
        self.__init__(*args, **kw)

    def start(self):
        Serialized.pending = 1
        self.em._lock = l = threading.Lock()
        l.acquire()
        Serialized.release(l, wake_other=0)
        threading.Thread.start(self)

    def run(self):
        try:
            Serialized.acquire(self.em._lock)
            super(ServerNode, self).run()
        finally:
            self._afterRun()
            neo.lib.logging.debug('stopping %r', self)
            Serialized.background()

    def _afterRun(self):
        try:
            self.listening_conn.close()
        except AttributeError:
            pass

    def getListeningAddress(self):
        try:
            return self.listening_conn.getAddress()
        except AttributeError:
            raise ConnectorConnectionRefusedException

class AdminApplication(ServerNode, neo.admin.app.Application):
    pass

class MasterApplication(ServerNode, neo.master.app.Application):
    pass

class StorageApplication(ServerNode, neo.storage.app.Application):

    def resetNode(self, clear_database=False):
        self._init_args[1]['getReset'] = clear_database
        dm = self.dm
        super(StorageApplication, self).resetNode()
        if dm and not clear_database:
            self.dm = dm

    def _afterRun(self):
        super(StorageApplication, self)._afterRun()
        try:
            self.dm.close()
            self.dm = None
        except StandardError: # AttributeError & ProgrammingError
            pass

273 274 275 276 277 278
    def switchTables(self):
        adapter = self._init_args[1]['getAdapter']
        dm = self.dm
        if adapter == 'BTree':
            dm._obj, dm._tobj = dm._tobj, dm._obj
            dm._trans, dm._ttrans = dm._ttrans, dm._trans
279 280 281 282
            uncommitted_data = dm._uncommitted_data
            for checksum, (_, _, index) in dm._data.iteritems():
                uncommitted_data[checksum] = len(index)
                index.clear()
283 284 285 286 287 288 289 290 291 292 293
        elif adapter == 'MySQL':
            q = dm.query
            dm.begin()
            for table in ('trans', 'obj'):
                q('RENAME TABLE %s to tmp' % table)
                q('RENAME TABLE t%s to %s' % (table, table))
                q('RENAME TABLE tmp to t%s' % table)
            dm.commit()
        else:
            assert False

294 295 296 297 298 299 300 301 302 303 304 305
    def getDataLockInfo(self):
        adapter = self._init_args[1]['getAdapter']
        dm = self.dm
        if adapter == 'BTree':
            checksum_list = dm._data
        elif adapter == 'MySQL':
            checksum_list = [x for x, in dm.query("SELECT hash FROM data")]
        else:
            assert False
        assert set(dm._uncommitted_data).issubset(checksum_list)
        return dict((x, dm._uncommitted_data.get(x, 0)) for x in checksum_list)

306
class ClientApplication(Node, neo.client.app.Application):
307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329

    @SerializedEventManager.decorate
    def __init__(self, cluster):
        super(ClientApplication, self).__init__(
            cluster.master_nodes, cluster.name)
        self.em._lock = threading.Lock()

    def setPoll(self, master=False):
        if master:
            self.em._timeout = 1
            if not self.em._lock.acquire(0):
                Serialized.background()
        else:
            Serialized.release(wake_other=0); Serialized.acquire()
            self.em._timeout = 0

    def __del__(self):
        try:
            super(ClientApplication, self).__del__()
        finally:
            Serialized.background()
    close = __del__

330 331 332 333 334 335 336 337 338 339 340
    def filterConnection(self, *peers):
        conn_list = []
        for peer in peers:
            if isinstance(peer, MasterApplication):
                conn = self._getMasterConnection()
            else:
                assert isinstance(peer, StorageApplication)
                conn = self.cp.getConnForNode(self.nm.getByUUID(peer.uuid))
            conn_list.append(conn)
        return ConnectionFilter(*conn_list)

341 342 343
class NeoCTL(neo.neoctl.app.NeoCTL):

    @SerializedEventManager.decorate
344
    def __init__(self, cluster):
345
        self._cluster = cluster
346
        super(NeoCTL, self).__init__(cluster.admin.getVirtualAddress())
347
        self.em._timeout = -1
348 349


350
class LoggerThreadName(str):
351

352 353
    def __new__(cls, default='TEST'):
        return str.__new__(cls, default)
354

355
    def __getattribute__(self, attr):
356 357
        return getattr(str(self), attr)

358 359 360
    def __hash__(self):
        return id(self)

361 362 363 364
    def __str__(self):
        try:
            return threading.currentThread().node_name
        except AttributeError:
365
            return str.__str__(self)
366

367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413

class Patch(object):

    def __init__(self, patched, **patch):
        (name, patch), = patch.iteritems()
        wrapped = getattr(patched, name)
        wrapper = lambda *args, **kw: patch(wrapped, *args, **kw)
        orig = patched.__dict__.get(name)
        setattr(patched, name, wraps(wrapped)(wrapper))
        if orig is None:
            self._revert = lambda: delattr(patched, name)
        else:
            self._revert = lambda: setattr(patched, name, orig)

    def __del__(self):
        self._revert()


class ConnectionFilter(object):

    def __init__(self, *conns):
        self.filter_dict = {}
        self.lock = threading.Lock()
        self.conn_list = [(conn, self._patch(conn)) for conn in conns]

    def _patch(self, conn):
        assert '_addPacket' not in conn.__dict__
        lock = self.lock
        filter_dict = self.filter_dict
        orig = conn.__class__._addPacket
        queue = deque()
        def _addPacket(packet):
            lock.acquire()
            try:
                if not queue:
                    for filter in filter_dict:
                        if filter(conn, packet):
                            break
                    else:
                        return orig(conn, packet)
                queue.append(packet)
            finally:
                lock.release()
        conn._addPacket = _addPacket
        return queue

    def __call__(self, revert=1):
414
        with self.lock:
415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439
            self.filter_dict.clear()
            self._retry()
            if revert:
                for conn, queue in self.conn_list:
                    assert not queue
                    del conn._addPacket
                del self.conn_list[:]

    def _retry(self):
        for conn, queue in self.conn_list:
            while queue:
                packet = queue.popleft()
                for filter in self.filter_dict:
                    if filter(conn, packet):
                        queue.appendleft(packet)
                        break
                else:
                    conn.__class__._addPacket(conn, packet)
                    continue
                break

    def clear(self):
        self(0)

    def add(self, filter, *patches):
440
        with self.lock:
441 442 443
            self.filter_dict[filter] = patches

    def remove(self, *filters):
444
        with self.lock:
445 446 447 448 449 450 451
            for filter in filters:
                del self.filter_dict[filter]
            self._retry()

    def __contains__(self, filter):
        return filter in self.filter_dict

452 453 454 455 456 457 458 459 460
class NEOCluster(object):

    BaseConnection_checkTimeout = staticmethod(BaseConnection.checkTimeout)
    SocketConnector_makeClientConnection = staticmethod(
        SocketConnector.makeClientConnection)
    SocketConnector_makeListeningConnection = staticmethod(
        SocketConnector.makeListeningConnection)
    SocketConnector_send = staticmethod(SocketConnector.send)
    Storage__init__ = staticmethod(Storage.__init__)
461 462
    _patch_count = 0
    _resource_dict = weakref.WeakValueDictionary()
463

464 465 466 467 468 469
    def _allocate(self, resource, new):
        result = resource, new()
        while result in self._resource_dict:
            result = resource, new()
        self._resource_dict[result] = self
        return result[1]
470

471 472
    def _patch(cluster):
        cls = cluster.__class__
473 474 475
        cls._patch_count += 1
        if cls._patch_count > 1:
            return
476
        def makeClientConnection(self, addr):
477
            real_addr = ServerNode.resolv(addr)
478 479 480 481 482 483
            try:
                return cls.SocketConnector_makeClientConnection(self, real_addr)
            finally:
                self.remote_addr = addr
        def send(self, msg):
            result = cls.SocketConnector_send(self, msg)
484 485
            if Serialized.pending is not None:
                Serialized.pending = 1
486 487 488 489 490 491 492 493 494 495
            return result
        # TODO: 'sleep' should 'tic' in a smart way, so that storages can be
        #       safely started even if the cluster isn't.
        bootstrap.sleep = lambda seconds: None
        BaseConnection.checkTimeout = lambda self, t: None
        SocketConnector.makeClientConnection = makeClientConnection
        SocketConnector.makeListeningConnection = lambda self, addr: \
            cls.SocketConnector_makeListeningConnection(self, BIND)
        SocketConnector.send = send
        Storage.setupLog = lambda *args, **kw: None
496
        Serialized.init()
497 498

    @classmethod
499
    def _unpatch(cls):
500 501 502 503
        assert cls._patch_count > 0
        cls._patch_count -= 1
        if cls._patch_count:
            return
504 505 506 507 508 509 510 511 512 513 514
        bootstrap.sleep = time.sleep
        BaseConnection.checkTimeout = cls.BaseConnection_checkTimeout
        SocketConnector.makeClientConnection = \
            cls.SocketConnector_makeClientConnection
        SocketConnector.makeListeningConnection = \
            cls.SocketConnector_makeListeningConnection
        SocketConnector.send = cls.SocketConnector_send
        Storage.setupLog = setupLog

    def __init__(self, master_count=1, partitions=1, replicas=0,
                       adapter=os.getenv('NEO_TESTS_ADAPTER', 'BTree'),
515
                       storage_count=None, db_list=None, clear_databases=True,
516
                       db_user=DB_USER, db_password='', verbose=None):
517 518 519 520 521 522 523
        if verbose is not None:
            temp_dir = os.getenv('TEMP') or \
                os.path.join(tempfile.gettempdir(), 'neo_tests')
            os.path.exists(temp_dir) or os.makedirs(temp_dir)
            log_file = tempfile.mkstemp('.log', '', temp_dir)[1]
            print 'Logging to %r' % log_file
            setupLog(LoggerThreadName(), log_file, verbose)
524 525 526 527 528
        self.name = 'neo_%s' % self._allocate('name',
            lambda: random.randint(0, 100))
        master_list = [MasterApplication.newAddress()
                       for _ in xrange(master_count)]
        self.master_nodes = ' '.join('%s:%s' % x for x in master_list)
529 530 531
        weak_self = weakref.proxy(self)
        kw = dict(cluster=weak_self, getReplicas=replicas, getAdapter=adapter,
                  getPartitions=partitions, getReset=clear_databases)
532 533
        self.master_list = [MasterApplication(address=x, **kw)
                            for x in master_list]
534 535 536
        if db_list is None:
            if storage_count is None:
                storage_count = replicas + 1
537 538 539
            index = count().next
            db_list = ['%s%u' % (DB_PREFIX, self._allocate('db', index))
                       for _ in xrange(storage_count)]
540
        setupMySQLdb(db_list, db_user, db_password, clear_databases)
541
        db = '%s:%s@%%s' % (db_user, db_password)
542 543 544
        self.storage_list = [StorageApplication(getDatabase=db % x, **kw)
                             for x in db_list]
        self.admin_list = [AdminApplication(**kw)]
545 546
        self.client = ClientApplication(weak_self)
        self.neoctl = NeoCTL(weak_self)
547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563

    # A few shortcuts that work when there's only 1 master/storage/admin
    @property
    def master(self):
        master, = self.master_list
        return master
    @property
    def storage(self):
        storage, = self.storage_list
        return storage
    @property
    def admin(self):
        admin, = self.admin_list
        return admin
    ###

    def reset(self, clear_database=False):
564
        for node_type in 'master', 'storage', 'admin':
565 566 567 568 569 570
            kw = {}
            if node_type == 'storage':
                kw['clear_database'] = clear_database
            for node in getattr(self, node_type + '_list'):
                node.resetNode(**kw)
        self.client = ClientApplication(self)
571
        self.neoctl = NeoCTL(weakref.proxy(self))
572

573
    def start(self, storage_list=None, fast_startup=False):
574
        self._patch()
575 576 577 578 579
        for node_type in 'master', 'admin':
            for node in getattr(self, node_type + '_list'):
                node.start()
        self.tic()
        if fast_startup:
580
            self._startCluster()
581 582 583 584 585 586
        if storage_list is None:
            storage_list = self.storage_list
        for node in storage_list:
            node.start()
        self.tic()
        if not fast_startup:
587
            self._startCluster()
588
            self.tic()
589 590
        state = self.neoctl.getClusterState()
        assert state == ClusterStates.RUNNING, state
591 592
        self.enableStorageList(storage_list)

593 594 595 596 597 598 599 600 601 602 603
    def _startCluster(self):
        try:
            self.neoctl.startCluster()
        except RuntimeError:
            self.tic()
            if self.neoctl.getClusterState() not in (
                      ClusterStates.RUNNING,
                      ClusterStates.VERIFYING,
                  ):
                raise

604 605 606 607 608 609
    def enableStorageList(self, storage_list):
        self.neoctl.enableStorageList([x.uuid for x in storage_list])
        self.tic()
        for node in storage_list:
            assert self.getNodeState(node) == NodeStates.RUNNING

610 611 612 613 614 615 616
    @property
    def db(self):
        try:
            return self._db
        except AttributeError:
            self._db = db = ZODB.DB(storage=self.getZODBStorage())
            return db
617 618

    def stop(self):
619
        self.__dict__.pop('_db', self.client).close()
620 621
        #self.neoctl.setClusterState(ClusterStates.STOPPING) # TODO
        try:
622 623 624
            Serialized.release(stop=
                self.admin_list + self.storage_list + self.master_list)
            for node_type in 'admin', 'storage', 'master':
625 626 627 628 629
                for node in getattr(self, node_type + '_list'):
                    if node.isAlive():
                        node.join()
        finally:
            Serialized.acquire()
630
        self._unpatch()
631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649

    def tic(self, force=False):
        if force:
            Serialized.tic()
        while Serialized.pending:
            Serialized.tic()

    def getNodeState(self, node):
        uuid = node.uuid
        for node in self.neoctl.getNodeList(node.node_type):
            if node[2] == uuid:
                return node[3]

    def getOudatedCells(self):
        return [cell for row in self.neoctl.getPartitionRowList()[1]
                     for cell in row[1]
                     if cell[1] == CellStates.OUT_OF_DATE]

    def getZODBStorage(self, **kw):
650 651 652
        # automatically put client in master mode
        if self.client.em._timeout == 0:
            self.client.setPoll(True)
653 654
        return Storage.Storage(None, self.name, _app=self.client, **kw)

655 656 657 658 659 660 661 662 663
    def populate(self, dummy_zodb=None):
        if dummy_zodb is None:
            from ..stat_zodb import PROD1
            dummy_zodb = PROD1()
        importFrom = self.getZODBStorage().importFrom
        preindex = {}
        as_storage = dummy_zodb.as_storage
        return lambda count: importFrom(as_storage(count), preindex=preindex)

664 665
    def getTransaction(self):
        txn = transaction.TransactionManager()
666
        return txn, self.db.open(transaction_manager=txn)
667

668 669 670 671 672 673 674
    def __del__(self):
        self.neoctl.close()
        for node_type in 'admin', 'storage', 'master':
            for node in getattr(self, node_type + '_list'):
                node.close()
        self.client.em.close()

675 676 677 678
    def extraCellSortKey(self, key):
        return Patch(self.client.cp, _getCellSortKey=lambda orig, *args:
            (orig(*args), key(*args)))

679

680
class NEOThreadedTest(NeoTestBase):
681 682 683 684

    def setupLog(self):
        log_file = os.path.join(getTempDirectory(), self.id() + '.log')
        setupLog(LoggerThreadName(), log_file, True)
685

686 687 688 689 690 691 692 693 694 695
    def getUnpickler(self, conn):
        reader = conn._reader
        def unpickler(data, compression=False):
            if compression:
                data = decompress(data)
            obj = reader.getGhost(data)
            reader.setGhostState(obj, data)
            return obj
        return unpickler

696 697 698 699 700
    class newThread(threading.Thread):

        def __init__(self, func, *args, **kw):
            threading.Thread.__init__(self)
            self.__target = func, args, kw
701
            self.daemon = True
702 703 704 705 706 707 708 709 710 711 712 713 714 715 716
            self.start()

        def run(self):
            try:
                apply(*self.__target)
                self.__exc_info = None
            except:
                self.__exc_info = sys.exc_info()

        def join(self, timeout=None):
            threading.Thread.join(self, timeout)
            if not self.isAlive() and self.__exc_info:
                etype, value, tb = self.__exc_info
                del self.__exc_info
                raise etype, value, tb