__init__.py 28.6 KB
Newer Older
1
#
2
# Copyright (C) 2011-2015  Nexedi SA
3 4 5 6 7 8 9 10 11 12 13 14
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
15
# along with this program.  If not, see <http://www.gnu.org/licenses/>.
16

17 18
# XXX: Consider using ClusterStates.STOPPING to stop clusters

19
import os, random, socket, sys, tempfile, threading, time, types, weakref
20
import traceback
21
from collections import deque
22
from ConfigParser import SafeConfigParser
23
from contextlib import contextmanager
24
from itertools import count
25
from functools import wraps
26
from zlib import decompress
27 28 29 30 31
from mock import Mock
import transaction, ZODB
import neo.admin.app, neo.master.app, neo.storage.app
import neo.client.app, neo.neoctl.app
from neo.client import Storage
32
from neo.lib import bootstrap, logging
33
from neo.lib.connection import BaseConnection, Connection
34
from neo.lib.connector import SocketConnector, \
35
    ConnectorConnectionRefusedException, ConnectorTryAgainException
36
from neo.lib.event import EventManager
37
from neo.lib.protocol import CellStates, ClusterStates, NodeStates, NodeTypes
38
from neo.lib.util import SOCKET_CONNECTORS_DICT, parseMasterList, p64
39
from .. import NeoTestBase, getTempDirectory, setupMySQLdb, \
40
    ADDRESS_TYPE, IP_VERSION_FORMAT_DICT, DB_PREFIX, DB_USER
41 42 43 44 45 46 47

BIND = IP_VERSION_FORMAT_DICT[ADDRESS_TYPE], 0
LOCAL_IP = socket.inet_pton(ADDRESS_TYPE, IP_VERSION_FORMAT_DICT[ADDRESS_TYPE])


class Serialized(object):

48 49 50 51
    @classmethod
    def init(cls):
        cls._global_lock = threading.Lock()
        cls._global_lock.acquire()
52 53
        cls._lock_list = deque()
        cls._lock_lock = threading.Lock()
54 55
        cls._pdb = False
        cls.pending = 0
56

57
    @classmethod
58
    def release(cls, lock=None, wake_other=True, stop=None):
59 60
        """Suspend lock owner and resume first suspended thread"""
        if lock is None:
61
            lock = cls._global_lock
62
            if stop:
63
                cls.pending = frozenset(stop)
64
            else:
65
                cls.pending = 0
66 67
        try:
            sys._getframe(1).f_trace.im_self.set_continue()
68
            cls._pdb = True
69 70
        except AttributeError:
            pass
71
        q = cls._lock_list
72 73 74 75 76 77 78 79
        l = cls._lock_lock
        l.acquire()
        try:
            q.append(lock)
            if wake_other:
                q.popleft().release()
        finally:
            l.release()
80

81 82
    @classmethod
    def acquire(cls, lock=None):
83 84
        """Suspend all threads except lock owner"""
        if lock is None:
85
            lock = cls._global_lock
86
        lock.acquire()
87
        pending = cls.pending # XXX: getattr once to avoid race conditions
88
        if type(pending) is frozenset:
89 90
            if lock is cls._global_lock:
                cls.pending = 0
91
            elif threading.currentThread() in pending:
92
                sys.exit()
93 94
        if cls._pdb:
            cls._pdb = False
95 96 97 98 99 100
            try:
                sys.stdout.write(threading.currentThread().node_name)
            except AttributeError:
                pass
            pdb(1)

101 102
    @classmethod
    def tic(cls, lock=None):
103 104
        # switch to another thread
        # (the following calls are not supposed to be debugged into)
105
        cls.release(lock); cls.acquire(lock)
106

107 108
    @classmethod
    def background(cls):
109
        with cls._lock_lock:
110 111
            if cls._lock_list:
                cls._lock_list.popleft().release()
112

113
class SerializedEventManager(EventManager):
114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139

    _lock = None
    _timeout = 0

    @classmethod
    def decorate(cls, func):
        def decorator(*args, **kw):
            try:
                EventManager.__init__ = types.MethodType(
                    cls.__init__.im_func, None, EventManager)
                return func(*args, **kw)
            finally:
                EventManager.__init__ = types.MethodType(
                    cls._super__init__.im_func, None, EventManager)
        return wraps(func)(decorator)

    _super__init__ = EventManager.__init__.im_func

    def __init__(self):
        cls = self.__class__
        assert cls is EventManager
        self.__class__ = SerializedEventManager
        self._super__init__()

    def _poll(self, timeout=1):
        if self._pending_processing:
140
            assert timeout <= 0
141 142 143 144
        elif 0 == self._timeout == timeout == Serialized.pending == len(
            self.writer_set):
            return
        else:
145
            if self.writer_set and Serialized.pending == 0:
146 147 148 149 150 151 152
                Serialized.pending = 1
            # Jump to another thread before polling, so that when a message is
            # sent on the network, one can debug immediately the receiving part.
            # XXX: Unfortunately, this means we have a useless full-cycle
            #      before the first message is sent.
            # TODO: Detect where a message is sent to jump immediately to nodes
            #       that will do something.
153
            Serialized.tic(self._lock)
154 155
            if timeout != 0:
                timeout = self._timeout
156
                if timeout != 0 and Serialized.pending == 1:
157 158 159
                    Serialized.pending = timeout = 0
        EventManager._poll(self, timeout)

160 161 162 163 164
    def addReader(self, conn):
        EventManager.addReader(self, conn)
        if type(Serialized.pending) is not frozenset:
            Serialized.pending = 1

165

166 167
class Node(object):

168
    def getConnectionList(self, *peers):
169
        addr = lambda c: c and (c.accepted_from or c.getAddress())
170
        addr_set = {addr(c.connector) for peer in peers
171
            for c in peer.em.connection_dict.itervalues()
172
            if isinstance(c, Connection)}
173
        addr_set.discard(None)
174
        return (c for c in self.em.connection_dict.itervalues()
175
            if isinstance(c, Connection) and addr(c.connector) in addr_set)
176 177 178

    def filterConnection(self, *peers):
        return ConnectionFilter(self.getConnectionList(*peers))
179 180

class ServerNode(Node):
181

182 183
    _server_class_dict = {}

184 185 186
    class __metaclass__(type):
        def __init__(cls, name, bases, d):
            type.__init__(cls, name, bases, d)
187
            if Node not in bases and threading.Thread not in cls.__mro__:
188
                cls.__bases__ = bases + (threading.Thread,)
189 190 191 192 193 194
                cls.node_type = getattr(NodeTypes, name[:-11].upper())
                cls._node_list = []
                cls._virtual_ip = socket.inet_ntop(ADDRESS_TYPE,
                    LOCAL_IP[:-1] + chr(2 + len(cls._server_class_dict)))
                cls._server_class_dict[cls._virtual_ip] = cls

195 196 197 198 199
    @staticmethod
    def resetPorts():
        for cls in ServerNode._server_class_dict.itervalues():
            del cls._node_list[:]

200 201 202 203 204 205 206 207 208 209 210 211 212
    @classmethod
    def newAddress(cls):
        address = cls._virtual_ip, len(cls._node_list)
        cls._node_list.append(None)
        return address

    @classmethod
    def resolv(cls, address):
        try:
            cls = cls._server_class_dict[address[0]]
        except KeyError:
            return address
        return cls._node_list[address[1]].getListeningAddress()
213 214

    @SerializedEventManager.decorate
215
    def __init__(self, cluster=None, address=None, **kw):
216 217
        if not address:
            address = self.newAddress()
218 219 220 221 222 223
        if cluster is None:
            master_nodes = kw['master_nodes']
            name = kw['name']
        else:
            master_nodes = kw.get('master_nodes', cluster.master_nodes)
            name = kw.get('name', cluster.name)
224 225
        port = address[1]
        self._node_list[port] = weakref.proxy(self)
226 227 228
        self._init_args = init_args = kw.copy()
        init_args['cluster'] = cluster
        init_args['address'] = address
229
        threading.Thread.__init__(self)
230
        self.daemon = True
231
        self.node_name = '%s_%u' % (self.node_type, port)
232 233
        kw.update(getCluster=name, getBind=address,
                  getMasters=parseMasterList(master_nodes, address))
234 235
        super(ServerNode, self).__init__(Mock(kw))

236
    def getVirtualAddress(self):
237
        return self._init_args['address']
238

239 240
    def resetNode(self):
        assert not self.isAlive()
241
        kw = self._init_args
242
        self.__dict__.clear()
243
        self.__init__(**kw)
244 245 246 247 248 249 250 251 252 253 254 255 256 257

    def start(self):
        Serialized.pending = 1
        self.em._lock = l = threading.Lock()
        l.acquire()
        Serialized.release(l, wake_other=0)
        threading.Thread.start(self)

    def run(self):
        try:
            Serialized.acquire(self.em._lock)
            super(ServerNode, self).run()
        finally:
            self._afterRun()
258
            logging.debug('stopping %r', self)
259 260 261 262 263 264 265 266
            Serialized.background()

    def _afterRun(self):
        try:
            self.listening_conn.close()
        except AttributeError:
            pass

267 268 269 270 271 272 273
    def stop(self):
        try:
            Serialized.release(stop=(self,))
            self.join()
        finally:
            Serialized.acquire()

274 275 276 277 278 279 280 281 282 283 284 285 286 287 288
    def getListeningAddress(self):
        try:
            return self.listening_conn.getAddress()
        except AttributeError:
            raise ConnectorConnectionRefusedException

class AdminApplication(ServerNode, neo.admin.app.Application):
    pass

class MasterApplication(ServerNode, neo.master.app.Application):
    pass

class StorageApplication(ServerNode, neo.storage.app.Application):

    def resetNode(self, clear_database=False):
289
        self._init_args['getReset'] = clear_database
290 291 292 293 294 295 296 297 298 299 300 301 302
        dm = self.dm
        super(StorageApplication, self).resetNode()
        if dm and not clear_database:
            self.dm = dm

    def _afterRun(self):
        super(StorageApplication, self)._afterRun()
        try:
            self.dm.close()
            self.dm = None
        except StandardError: # AttributeError & ProgrammingError
            pass

303 304 305
    def getAdapter(self):
        return self._init_args['getAdapter']

306
    def switchTables(self):
307 308 309 310 311
        q = self.dm.query
        for table in 'trans', 'obj':
            q('ALTER TABLE %s RENAME TO tmp' % table)
            q('ALTER TABLE t%s RENAME TO %s' % (table, table))
            q('ALTER TABLE tmp RENAME TO t%s' % table)
312

313 314
    def getDataLockInfo(self):
        dm = self.dm
315 316
        index = tuple(dm.query("SELECT id, hash, compression FROM data"))
        assert set(dm._uncommitted_data).issubset(x[0] for x in index)
317
        get = dm._uncommitted_data.get
318 319 320 321 322
        return {(str(h), c & 0x7f): get(i, 0) for i, h, c in index}

    def sqlCount(self, table):
        (r,), = self.dm.query("SELECT COUNT(*) FROM " + table)
        return r
323

324
class ClientApplication(Node, neo.client.app.Application):
325 326

    @SerializedEventManager.decorate
327 328
    def __init__(self, master_nodes, name, **kw):
        super(ClientApplication, self).__init__(master_nodes, name, **kw)
329 330 331 332 333 334 335 336 337 338 339 340 341 342 343
        self.em._lock = threading.Lock()

    def setPoll(self, master=False):
        if master:
            self.em._timeout = 1
            if not self.em._lock.acquire(0):
                Serialized.background()
        else:
            Serialized.release(wake_other=0); Serialized.acquire()
            self.em._timeout = 0

    def __del__(self):
        try:
            super(ClientApplication, self).__del__()
        finally:
344 345
            if self.poll_thread.isAlive():
                Serialized.background()
346 347
    close = __del__

348
    def getConnectionList(self, *peers):
349 350 351 352 353 354
        for peer in peers:
            if isinstance(peer, MasterApplication):
                conn = self._getMasterConnection()
            else:
                assert isinstance(peer, StorageApplication)
                conn = self.cp.getConnForNode(self.nm.getByUUID(peer.uuid))
355
            yield conn
356

357 358 359
class NeoCTL(neo.neoctl.app.NeoCTL):

    @SerializedEventManager.decorate
360 361
    def __init__(self, *args, **kw):
        super(NeoCTL, self).__init__(*args, **kw)
362
        self.em._timeout = -1
363 364


365
class LoggerThreadName(str):
366

367 368
    def __new__(cls, default='TEST'):
        return str.__new__(cls, default)
369

370
    def __getattribute__(self, attr):
371 372
        return getattr(str(self), attr)

373 374 375
    def __hash__(self):
        return id(self)

376 377 378 379
    def __str__(self):
        try:
            return threading.currentThread().node_name
        except AttributeError:
380
            return str.__str__(self)
381

382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401

class Patch(object):

    def __init__(self, patched, **patch):
        (name, patch), = patch.iteritems()
        wrapped = getattr(patched, name)
        wrapper = lambda *args, **kw: patch(wrapped, *args, **kw)
        orig = patched.__dict__.get(name)
        setattr(patched, name, wraps(wrapped)(wrapper))
        if orig is None:
            self._revert = lambda: delattr(patched, name)
        else:
            self._revert = lambda: setattr(patched, name, orig)

    def __del__(self):
        self._revert()


class ConnectionFilter(object):

402
    filtered_count = 0
403 404 405 406 407 408 409 410
    filter_list = []
    filter_queue = weakref.WeakKeyDictionary()
    lock = threading.Lock()
    _addPacket = Connection._addPacket

    @contextmanager
    def __new__(cls, conn_list=()):
        self = object.__new__(cls)
411
        self.filter_dict = {}
412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445
        self.conn_list = frozenset(conn_list)
        if not cls.filter_list:
            def _addPacket(conn, packet):
                with cls.lock:
                    try:
                        queue = cls.filter_queue[conn]
                    except KeyError:
                        for self in cls.filter_list:
                            if self(conn, packet):
                                self.filtered_count += 1
                                break
                        else:
                            return cls._addPacket(conn, packet)
                        cls.filter_queue[conn] = queue = deque()
                    p = packet.__new__(packet.__class__)
                    p.__dict__.update(packet.__dict__)
                    queue.append(p)
            Connection._addPacket = _addPacket
        try:
            cls.filter_list.append(self)
            yield self
        finally:
            del cls.filter_list[-1:]
            if not cls.filter_list:
                Connection._addPacket = cls._addPacket.im_func
        with cls.lock:
            cls._retry()

    def __call__(self, conn, packet):
        if not self.conn_list or conn in self.conn_list:
            for filter in self.filter_dict:
                if filter(conn, packet):
                    return True
        return False
446

447 448 449
    @classmethod
    def _retry(cls):
        for conn, queue in cls.filter_queue.items():
450 451
            while queue:
                packet = queue.popleft()
452 453
                for self in cls.filter_list:
                    if self(conn, packet):
454 455 456
                        queue.appendleft(packet)
                        break
                else:
457
                    cls._addPacket(conn, packet)
458 459
                    continue
                break
460 461
            else:
                del cls.filter_queue[conn]
462 463

    def add(self, filter, *patches):
464
        with self.lock:
465 466 467
            self.filter_dict[filter] = patches

    def remove(self, *filters):
468
        with self.lock:
469 470 471 472 473 474 475
            for filter in filters:
                del self.filter_dict[filter]
            self._retry()

    def __contains__(self, filter):
        return filter in self.filter_dict

476 477 478 479 480 481 482
class NEOCluster(object):

    BaseConnection_checkTimeout = staticmethod(BaseConnection.checkTimeout)
    SocketConnector_makeClientConnection = staticmethod(
        SocketConnector.makeClientConnection)
    SocketConnector_makeListeningConnection = staticmethod(
        SocketConnector.makeListeningConnection)
483
    SocketConnector_receive = staticmethod(SocketConnector.receive)
484
    SocketConnector_send = staticmethod(SocketConnector.send)
485 486
    _patch_count = 0
    _resource_dict = weakref.WeakValueDictionary()
487

488 489 490 491 492 493
    def _allocate(self, resource, new):
        result = resource, new()
        while result in self._resource_dict:
            result = resource, new()
        self._resource_dict[result] = self
        return result[1]
494

495 496 497
    @staticmethod
    def _patch():
        cls = NEOCluster
498 499 500
        cls._patch_count += 1
        if cls._patch_count > 1:
            return
501
        def makeClientConnection(self, addr):
502
            real_addr = ServerNode.resolv(addr)
503 504 505 506 507 508
            try:
                return cls.SocketConnector_makeClientConnection(self, real_addr)
            finally:
                self.remote_addr = addr
        def send(self, msg):
            result = cls.SocketConnector_send(self, msg)
509
            if type(Serialized.pending) is not frozenset:
510
                Serialized.pending = 1
511
            return result
512 513 514 515 516 517 518 519 520 521 522 523 524 525
        def receive(self):
            # If the peer sent an entire packet, make sure we read it entirely,
            # otherwise Serialize.pending would be reset to 0.
            data = ''
            try:
                while True:
                    d = cls.SocketConnector_receive(self)
                    if not d:
                        return data
                    data += d
            except ConnectorTryAgainException:
                if data:
                    return data
                raise
526 527 528 529 530 531 532
        # TODO: 'sleep' should 'tic' in a smart way, so that storages can be
        #       safely started even if the cluster isn't.
        bootstrap.sleep = lambda seconds: None
        BaseConnection.checkTimeout = lambda self, t: None
        SocketConnector.makeClientConnection = makeClientConnection
        SocketConnector.makeListeningConnection = lambda self, addr: \
            cls.SocketConnector_makeListeningConnection(self, BIND)
533
        SocketConnector.receive = receive
534
        SocketConnector.send = send
535
        Serialized.init()
536

537 538 539
    @staticmethod
    def _unpatch():
        cls = NEOCluster
540 541 542 543
        assert cls._patch_count > 0
        cls._patch_count -= 1
        if cls._patch_count:
            return
544 545 546 547 548 549
        bootstrap.sleep = time.sleep
        BaseConnection.checkTimeout = cls.BaseConnection_checkTimeout
        SocketConnector.makeClientConnection = \
            cls.SocketConnector_makeClientConnection
        SocketConnector.makeListeningConnection = \
            cls.SocketConnector_makeListeningConnection
550
        SocketConnector.receive = cls.SocketConnector_receive
551 552
        SocketConnector.send = cls.SocketConnector_send

553 554
    def __init__(self, master_count=1, partitions=1, replicas=0, upstream=None,
                       adapter=os.getenv('NEO_TESTS_ADAPTER', 'SQLite'),
555
                       storage_count=None, db_list=None, clear_databases=True,
556 557
                       db_user=DB_USER, db_password='', compress=True,
                       importer=None):
558 559 560 561 562
        self.name = 'neo_%s' % self._allocate('name',
            lambda: random.randint(0, 100))
        master_list = [MasterApplication.newAddress()
                       for _ in xrange(master_count)]
        self.master_nodes = ' '.join('%s:%s' % x for x in master_list)
563 564 565
        weak_self = weakref.proxy(self)
        kw = dict(cluster=weak_self, getReplicas=replicas, getAdapter=adapter,
                  getPartitions=partitions, getReset=clear_databases)
566
        if upstream is not None:
Vincent Pelletier's avatar
Vincent Pelletier committed
567 568 569
            self.upstream = weakref.proxy(upstream)
            kw.update(getUpstreamCluster=upstream.name,
                getUpstreamMasters=parseMasterList(upstream.master_nodes))
570 571
        self.master_list = [MasterApplication(address=x, **kw)
                            for x in master_list]
572 573 574
        if db_list is None:
            if storage_count is None:
                storage_count = replicas + 1
575 576 577
            index = count().next
            db_list = ['%s%u' % (DB_PREFIX, self._allocate('db', index))
                       for _ in xrange(storage_count)]
578 579 580
        if adapter == 'MySQL':
            setupMySQLdb(db_list, db_user, db_password, clear_databases)
            db = '%s:%s@%%s' % (db_user, db_password)
581 582
        elif adapter == 'SQLite':
            db = os.path.join(getTempDirectory(), '%s.sqlite')
583 584
        else:
            assert False, adapter
585 586 587 588 589 590 591 592 593 594 595 596 597
        if importer:
            cfg = SafeConfigParser()
            cfg.add_section("neo")
            cfg.set("neo", "adapter", adapter)
            cfg.set("neo", "database", db % tuple(db_list))
            for name, zodb in importer:
                cfg.add_section(name)
                for x in zodb.iteritems():
                    cfg.set(name, *x)
            db = os.path.join(getTempDirectory(), '%s.conf')
            with open(db % tuple(db_list), "w") as f:
                cfg.write(f)
            kw["getAdapter"] = "Importer"
598 599 600
        self.storage_list = [StorageApplication(getDatabase=db % x, **kw)
                             for x in db_list]
        self.admin_list = [AdminApplication(**kw)]
601
        self.client = ClientApplication(name=self.name,
602
            master_nodes=self.master_nodes, compress=compress)
603
        self.neoctl = NeoCTL(self.admin.getVirtualAddress())
604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619

    # A few shortcuts that work when there's only 1 master/storage/admin
    @property
    def master(self):
        master, = self.master_list
        return master
    @property
    def storage(self):
        storage, = self.storage_list
        return storage
    @property
    def admin(self):
        admin, = self.admin_list
        return admin
    ###

620 621 622 623 624
    @property
    def primary_master(self):
        master, = [master for master in self.master_list if master.primary]
        return master

625
    def reset(self, clear_database=False):
626
        for node_type in 'master', 'storage', 'admin':
627 628 629 630 631
            kw = {}
            if node_type == 'storage':
                kw['clear_database'] = clear_database
            for node in getattr(self, node_type + '_list'):
                node.resetNode(**kw)
632 633 634
        self.client = ClientApplication(name=self.name,
            master_nodes=self.master_nodes)
        self.neoctl = NeoCTL(self.admin.getVirtualAddress())
635

636
    def start(self, storage_list=None, fast_startup=False):
637
        self._patch()
638 639 640 641 642
        for node_type in 'master', 'admin':
            for node in getattr(self, node_type + '_list'):
                node.start()
        self.tic()
        if fast_startup:
643
            self._startCluster()
644 645 646 647 648 649
        if storage_list is None:
            storage_list = self.storage_list
        for node in storage_list:
            node.start()
        self.tic()
        if not fast_startup:
650
            self._startCluster()
651
            self.tic()
652
        state = self.neoctl.getClusterState()
653
        assert state in (ClusterStates.RUNNING, ClusterStates.BACKINGUP), state
654 655
        self.enableStorageList(storage_list)

656 657 658 659 660 661
    def _startCluster(self):
        try:
            self.neoctl.startCluster()
        except RuntimeError:
            self.tic()
            if self.neoctl.getClusterState() not in (
662
                      ClusterStates.BACKINGUP,
663 664 665 666 667
                      ClusterStates.RUNNING,
                      ClusterStates.VERIFYING,
                  ):
                raise

668 669 670 671 672 673
    def enableStorageList(self, storage_list):
        self.neoctl.enableStorageList([x.uuid for x in storage_list])
        self.tic()
        for node in storage_list:
            assert self.getNodeState(node) == NodeStates.RUNNING

674 675 676 677 678 679 680
    @property
    def db(self):
        try:
            return self._db
        except AttributeError:
            self._db = db = ZODB.DB(storage=self.getZODBStorage())
            return db
681 682

    def stop(self):
683 684
        if hasattr(self, '_db') and self.client.em._timeout == 0:
            self.client.setPoll(True)
685
        self.__dict__.pop('_db', self.client).close()
686
        try:
687 688 689
            Serialized.release(stop=
                self.admin_list + self.storage_list + self.master_list)
            for node_type in 'admin', 'storage', 'master':
690 691 692 693 694
                for node in getattr(self, node_type + '_list'):
                    if node.isAlive():
                        node.join()
        finally:
            Serialized.acquire()
695
        self._unpatch()
696

697 698 699
    @staticmethod
    def tic(force=False):
        # XXX: Should we automatically switch client in slave mode if it isn't ?
700 701 702 703 704
        f = sys._getframe(1)
        try:
            logging.info('tic (%s:%u) ...', f.f_code.co_filename, f.f_lineno)
        finally:
            del f
705 706
        if force:
            Serialized.tic()
707
            logging.info('forced tic')
708 709
        while Serialized.pending:
            Serialized.tic()
710
            logging.info('tic')
711 712 713 714 715 716 717 718 719 720 721 722 723

    def getNodeState(self, node):
        uuid = node.uuid
        for node in self.neoctl.getNodeList(node.node_type):
            if node[2] == uuid:
                return node[3]

    def getOudatedCells(self):
        return [cell for row in self.neoctl.getPartitionRowList()[1]
                     for cell in row[1]
                     if cell[1] == CellStates.OUT_OF_DATE]

    def getZODBStorage(self, **kw):
724 725 726
        # automatically put client in master mode
        if self.client.em._timeout == 0:
            self.client.setPoll(True)
727 728
        return Storage.Storage(None, self.name, _app=self.client, **kw)

729
    def importZODB(self, dummy_zodb=None, random=random):
730 731
        if dummy_zodb is None:
            from ..stat_zodb import PROD1
732
            dummy_zodb = PROD1(random)
733 734
        preindex = {}
        as_storage = dummy_zodb.as_storage
735 736
        return lambda count: self.getZODBStorage().importFrom(
            as_storage(count), preindex=preindex)
737

738 739 740 741 742 743 744 745
    def populate(self, transaction_list, tid=lambda i: p64(i+1),
                                         oid=lambda i: p64(i+1)):
        storage = self.getZODBStorage()
        tid_dict = {}
        for i, oid_list in enumerate(transaction_list):
            txn = transaction.Transaction()
            storage.tpc_begin(txn, tid(i))
            for o in oid_list:
746
                storage.store(oid(o), tid_dict.get(o), repr((i, o)), '', txn)
747 748 749 750 751
            storage.tpc_vote(txn)
            i = storage.tpc_finish(txn)
            for o in oid_list:
                tid_dict[o] = i

752 753
    def getTransaction(self):
        txn = transaction.TransactionManager()
754
        return txn, self.db.open(transaction_manager=txn)
755

756 757 758 759 760 761 762 763 764 765
    def __del__(self, __print_exc=traceback.print_exc):
        try:
            self.neoctl.close()
            for node_type in 'admin', 'storage', 'master':
                for node in getattr(self, node_type + '_list'):
                    node.close()
            self.client.em.close()
        except:
            __print_exc()
            raise
766

767
    def extraCellSortKey(self, key):
768 769
        return Patch(self.client.cp, getCellSortKey=lambda orig, cell:
            (orig(cell), key(cell)))
770

771

772
class NEOThreadedTest(NeoTestBase):
773 774 775

    def setupLog(self):
        log_file = os.path.join(getTempDirectory(), self.id() + '.log')
776
        logging.setup(log_file)
777
        return LoggerThreadName()
778

779 780
    def _tearDown(self, success):
        super(NEOThreadedTest, self)._tearDown(success)
781
        ServerNode.resetPorts()
782
        if success:
783 784 785
            with logging as db:
                db.execute("UPDATE packet SET body=NULL")
                db.execute("VACUUM")
786

787 788 789 790 791 792 793 794 795 796
    def getUnpickler(self, conn):
        reader = conn._reader
        def unpickler(data, compression=False):
            if compression:
                data = decompress(data)
            obj = reader.getGhost(data)
            reader.setGhostState(obj, data)
            return obj
        return unpickler

797 798 799 800 801
    class newThread(threading.Thread):

        def __init__(self, func, *args, **kw):
            threading.Thread.__init__(self)
            self.__target = func, args, kw
802
            self.daemon = True
803 804 805 806 807 808 809 810 811 812 813 814 815 816 817
            self.start()

        def run(self):
            try:
                apply(*self.__target)
                self.__exc_info = None
            except:
                self.__exc_info = sys.exc_info()

        def join(self, timeout=None):
            threading.Thread.join(self, timeout)
            if not self.isAlive() and self.__exc_info:
                etype, value, tb = self.__exc_info
                del self.__exc_info
                raise etype, value, tb
818 819 820 821 822 823


def predictable_random(seed=None):
    # Because we have 2 running threads when client works, we can't
    # patch neo.client.pool (and cluster should have 1 storage).
    from neo.master import backup_app
824
    from neo.master.handlers import administration
825 826 827 828
    from neo.storage import replicator
    def decorator(wrapped):
        def wrapper(*args, **kw):
            s = repr(time.time()) if seed is None else seed
829
            logging.info("using seed %r", s)
830 831
            r = random.Random(s)
            try:
832 833
                administration.random = backup_app.random = replicator.random \
                    = r
834 835
                return wrapped(*args, **kw)
            finally:
836 837
                administration.random = backup_app.random = replicator.random \
                    = random
838 839
        return wraps(wrapped)(wrapper)
    return decorator