pax_global_header 0000666 0000000 0000000 00000000064 12601037530 0014507 g ustar 00root root 0000000 0000000 52 comment=7d5b155980afbc07eed092acc92f4d841ca7265b
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/ 0000775 0000000 0000000 00000000000 12601037530 0021074 5 ustar 00root root 0000000 0000000 neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/ 0000775 0000000 0000000 00000000000 12601037530 0021655 5 ustar 00root root 0000000 0000000 neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/__init__.py 0000664 0000000 0000000 00000000000 12601037530 0023754 0 ustar 00root root 0000000 0000000 neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/admin/ 0000775 0000000 0000000 00000000000 12601037530 0022745 5 ustar 00root root 0000000 0000000 neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/admin/__init__.py 0000664 0000000 0000000 00000000000 12601037530 0025044 0 ustar 00root root 0000000 0000000 neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/admin/app.py 0000664 0000000 0000000 00000012631 12601037530 0024102 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2006-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
from neo.lib import logging
from neo.lib.app import BaseApplication
from neo.lib.connection import ListeningConnection
from neo.lib.exception import PrimaryFailure
from .handler import AdminEventHandler, MasterEventHandler, \
MasterRequestEventHandler
from neo.lib.bootstrap import BootstrapManager
from neo.lib.pt import PartitionTable
from neo.lib.protocol import ClusterStates, Errors, \
NodeTypes, NodeStates, Packets
from neo.lib.debug import register as registerLiveDebugger
class Application(BaseApplication):
"""The storage node application."""
def __init__(self, config):
super(Application, self).__init__(config.getDynamicMasterList())
for address in config.getMasters():
self.nm.createMaster(address=address)
self.name = config.getCluster()
self.server = config.getBind()
logging.debug('IP address is %s, port is %d', *self.server)
# The partition table is initialized after getting the number of
# partitions.
self.pt = None
self.uuid = config.getUUID()
self.request_handler = MasterRequestEventHandler(self)
self.master_event_handler = MasterEventHandler(self)
self.cluster_state = None
self.reset()
registerLiveDebugger(on_log=self.log)
def close(self):
self.listening_conn = None
super(Application, self).close()
def reset(self):
self.bootstrapped = False
self.master_conn = None
self.master_node = None
def log(self):
self.em.log()
self.nm.log()
if self.pt is not None:
self.pt.log()
def run(self):
try:
self._run()
except Exception:
logging.exception('Pre-mortem data:')
self.log()
logging.flush()
raise
def _run(self):
"""Make sure that the status is sane and start a loop."""
if len(self.name) == 0:
raise RuntimeError, 'cluster name must be non-empty'
# Make a listening port.
handler = AdminEventHandler(self)
self.listening_conn = ListeningConnection(self.em, handler, self.server)
while self.cluster_state != ClusterStates.STOPPING:
self.connectToPrimary()
try:
while True:
self.em.poll(1)
except PrimaryFailure:
logging.error('primary master is down')
self.listening_conn.close()
while not self.em.isIdle():
self.em.poll(1)
def connectToPrimary(self):
"""Find a primary master node, and connect to it.
If a primary master node is not elected or ready, repeat
the attempt of a connection periodically.
Note that I do not accept any connection from non-master nodes
at this stage.
"""
self.cluster_state = None
# search, find, connect and identify to the primary master
bootstrap = BootstrapManager(self, self.name, NodeTypes.ADMIN,
self.uuid, self.server)
data = bootstrap.getPrimaryConnection()
(node, conn, uuid, num_partitions, num_replicas) = data
self.master_node = node
self.master_conn = conn
self.uuid = uuid
if self.pt is None:
self.pt = PartitionTable(num_partitions, num_replicas)
elif self.pt.getPartitions() != num_partitions:
# XXX: shouldn't we recover instead of raising ?
raise RuntimeError('the number of partitions is inconsistent')
elif self.pt.getReplicas() != num_replicas:
# XXX: shouldn't we recover instead of raising ?
raise RuntimeError('the number of replicas is inconsistent')
# passive handler
self.master_conn.setHandler(self.master_event_handler)
self.master_conn.ask(Packets.AskClusterState())
self.master_conn.ask(Packets.AskNodeInformation())
self.master_conn.ask(Packets.AskPartitionTable())
def sendPartitionTable(self, conn, min_offset, max_offset, uuid):
# we have a pt
self.pt.log()
row_list = []
if max_offset == 0:
max_offset = self.pt.getPartitions()
try:
for offset in xrange(min_offset, max_offset):
row = []
try:
for cell in self.pt.getCellList(offset):
if uuid is None or cell.getUUID() == uuid:
row.append((cell.getUUID(), cell.getState()))
except TypeError:
pass
row_list.append((offset, row))
except IndexError:
conn.notify(Errors.ProtocolError('invalid partition table offset'))
else:
conn.answer(Packets.AnswerPartitionList(self.pt.getID(), row_list))
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/admin/handler.py 0000664 0000000 0000000 00000011607 12601037530 0024741 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2009-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
from neo.lib import logging, protocol
from neo.lib.handler import EventHandler
from neo.lib.protocol import uuid_str, Packets
from neo.lib.exception import PrimaryFailure
def check_primary_master(func):
def wrapper(self, *args, **kw):
if self.app.bootstrapped:
return func(self, *args, **kw)
raise protocol.NotReadyError('Not connected to a primary master.')
return wrapper
def forward_ask(klass):
return check_primary_master(lambda self, conn, *args, **kw:
self.app.master_conn.ask(klass(*args, **kw),
conn=conn, msg_id=conn.getPeerId()))
class AdminEventHandler(EventHandler):
"""This class deals with events for administrating cluster."""
@check_primary_master
def askPartitionList(self, conn, min_offset, max_offset, uuid):
logging.info("ask partition list from %s to %s for %s",
min_offset, max_offset, uuid_str(uuid))
self.app.sendPartitionTable(conn, min_offset, max_offset, uuid)
@check_primary_master
def askNodeList(self, conn, node_type):
if node_type is None:
node_type = 'all'
node_filter = None
else:
node_filter = lambda n: n.getType() is node_type
logging.info("ask list of %s nodes", node_type)
node_list = self.app.nm.getList(node_filter)
node_information_list = [node.asTuple() for node in node_list ]
p = Packets.AnswerNodeList(node_information_list)
conn.answer(p)
@check_primary_master
def askClusterState(self, conn):
conn.answer(Packets.AnswerClusterState(self.app.cluster_state))
@check_primary_master
def askPrimary(self, conn):
master_node = self.app.master_node
conn.answer(Packets.AnswerPrimary(master_node.getUUID()))
askLastIDs = forward_ask(Packets.AskLastIDs)
askLastTransaction = forward_ask(Packets.AskLastTransaction)
addPendingNodes = forward_ask(Packets.AddPendingNodes)
tweakPartitionTable = forward_ask(Packets.TweakPartitionTable)
setClusterState = forward_ask(Packets.SetClusterState)
setNodeState = forward_ask(Packets.SetNodeState)
checkReplicas = forward_ask(Packets.CheckReplicas)
class MasterEventHandler(EventHandler):
""" This class is just used to dispacth message to right handler"""
def _connectionLost(self, conn):
app = self.app
if app.listening_conn: # if running
assert app.master_conn in (conn, None)
conn.cancelRequests("connection to master lost")
app.reset()
app.uuid = None
raise PrimaryFailure
def connectionFailed(self, conn):
self._connectionLost(conn)
def connectionClosed(self, conn):
self._connectionLost(conn)
def dispatch(self, conn, packet, kw={}):
if 'conn' in kw:
# expected answer
if packet.isResponse():
packet.setId(kw['msg_id'])
kw['conn'].answer(packet)
else:
self.app.request_handler.dispatch(conn, packet, kw)
else:
# unexpected answers and notifications
super(MasterEventHandler, self).dispatch(conn, packet, kw)
def answerClusterState(self, conn, state):
self.app.cluster_state = state
def answerNodeInformation(self, conn):
# XXX: This will no more exists when the initialization module will be
# implemented for factorize code (as done for bootstrap)
logging.debug("answerNodeInformation")
def notifyPartitionChanges(self, conn, ptid, cell_list):
self.app.pt.update(ptid, cell_list, self.app.nm)
def answerPartitionTable(self, conn, ptid, row_list):
self.app.pt.load(ptid, row_list, self.app.nm)
self.app.bootstrapped = True
def sendPartitionTable(self, conn, ptid, row_list):
if self.app.bootstrapped:
self.app.pt.load(ptid, row_list, self.app.nm)
def notifyClusterInformation(self, conn, cluster_state):
self.app.cluster_state = cluster_state
def notifyNodeInformation(self, conn, node_list):
self.app.nm.update(node_list)
class MasterRequestEventHandler(EventHandler):
""" This class handle all answer from primary master node"""
# XXX: to be deleted ?
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/client/ 0000775 0000000 0000000 00000000000 12601037530 0023133 5 ustar 00root root 0000000 0000000 neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/client/Storage.py 0000664 0000000 0000000 00000020056 12601037530 0025114 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2006-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
from ZODB import BaseStorage, ConflictResolution, POSException
from zope.interface import implements
import ZODB.interfaces
from functools import wraps
from neo.lib import logging
from .app import Application
from .exception import NEOStorageNotFoundError, NEOStorageDoesNotExistError
def raiseReadOnlyError(*args, **kw):
raise POSException.ReadOnlyError()
class Storage(BaseStorage.BaseStorage,
ConflictResolution.ConflictResolvingStorage):
"""Wrapper class for neoclient."""
implements(
ZODB.interfaces.IStorage,
# ZODB.interfaces.IStorageRestoreable,
ZODB.interfaces.IStorageIteration,
ZODB.interfaces.IStorageUndoable,
ZODB.interfaces.IExternalGC,
ZODB.interfaces.ReadVerifyingStorage,
)
def __init__(self, master_nodes, name, read_only=False,
compress=None, logfile=None, _app=None, **kw):
"""
Do not pass those parameters (used internally):
_app
"""
if compress is None:
compress = True
if logfile:
logging.setup(logfile)
BaseStorage.BaseStorage.__init__(self, 'NEOStorage(%s)' % (name, ))
# Warning: _is_read_only is used in BaseStorage, do not rename it.
self._is_read_only = read_only
if read_only:
for method_id in (
'new_oid',
'tpc_begin',
'tpc_vote',
'tpc_abort',
'store',
'deleteObject',
'undo',
'undoLog',
):
setattr(self, method_id, raiseReadOnlyError)
if _app is None:
_app = Application(master_nodes, name, compress=compress, **kw)
self.app = _app
@property
def _cache(self):
return self.app._cache
def load(self, oid, version=''):
# In order to know if it was safe to get the last revision of an object
# instead of using loadBefore(), ZODB.Connection._setstate relies on
# the fact that retrieving data from a remote storage forces incoming
# invalidations to be received.
# But in NEO, invalidations are not received from the same network
# connection that the one used to retrieve data.
# So we must implement load() like a loadBefore().
# XXX: interface definition states that version parameter is
# mandatory, while some ZODB tests do not provide it. For now, make
# it optional.
assert version == '', 'Versions are not supported'
try:
return self.app.load(oid)[:2]
except NEOStorageNotFoundError:
raise POSException.POSKeyError(oid)
def new_oid(self):
return self.app.new_oid()
def tpc_begin(self, transaction, tid=None, status=' '):
"""
Note: never blocks in NEO.
"""
return self.app.tpc_begin(transaction, tid, status)
def tpc_vote(self, transaction):
return self.app.tpc_vote(transaction, self.tryToResolveConflict)
def tpc_abort(self, transaction):
return self.app.tpc_abort(transaction)
def tpc_finish(self, transaction, f=None):
return self.app.tpc_finish(transaction, self.tryToResolveConflict, f)
def store(self, oid, serial, data, version, transaction):
assert version == '', 'Versions are not supported'
return self.app.store(oid, serial, data, version, transaction)
def deleteObject(self, oid, serial, transaction):
self.app.store(oid, serial, None, None, transaction)
# mutliple revisions
def loadSerial(self, oid, serial):
try:
return self.app.load(oid, serial)[0]
except NEOStorageNotFoundError:
raise POSException.POSKeyError(oid)
def loadBefore(self, oid, tid):
# XXX: FileStorage return an empty string for a deleted object
# but it may cause EOFError exceptions in ZODB.Connection
# and it makes impossible to store empty values.
# We think it's wrong behaviour and raise POSKeyError instead.
# Or maybe we should return None?
try:
return self.app.load(oid, None, tid)
except NEOStorageDoesNotExistError:
raise POSException.POSKeyError(oid)
except NEOStorageNotFoundError:
return None
@property
def iterator(self):
return self.app.iterator
# undo
def undo(self, transaction_id, txn):
return self.app.undo(transaction_id, txn, self.tryToResolveConflict)
def undoLog(self, first=0, last=-20, filter=None):
return self.app.undoLog(first, last, filter)
def supportsUndo(self):
return True
def supportsTransactionalUndo(self):
return True
def loadEx(self, oid, version):
try:
data, serial, _ = self.app.load(oid)
except NEOStorageNotFoundError:
raise POSException.POSKeyError(oid)
return data, serial, ''
def __len__(self):
return self.app.getObjectCount()
def registerDB(self, db, limit=None):
self.app.registerDB(db, limit)
def history(self, oid, *args, **kw):
try:
return self.app.history(oid, *args, **kw)
except NEOStorageNotFoundError:
raise POSException.POSKeyError(oid)
def sync(self, force=True):
# XXX: sync() is part of IMVCCStorage and we don't want to be called
# from afterCompletion() so it may not be a good place to ping the
# master here. See also monkey-patch in __init__.py
self.app.lastTransaction()
def copyTransactionsFrom(self, source, verbose=False):
""" Zope compliant API """
return self.importFrom(source)
def importFrom(self, source, start=None, stop=None, preindex=None):
""" Allow import only a part of the source storage """
return self.app.importFrom(source, start, stop,
self.tryToResolveConflict, preindex)
def pack(self, t, referencesf, gc=False):
if gc:
logging.warning('Garbage Collection is not available in NEO,'
' please use an external tool. Packing without GC.')
self.app.pack(t)
def lastSerial(self):
# seems unused
raise NotImplementedError
def lastTransaction(self):
# Used in ZODB unit tests
return self.app.lastTransaction()
def _clear_temp(self):
raise NotImplementedError
def set_max_oid(self, possible_new_max_oid):
# seems used only by FileStorage
raise NotImplementedError
def cleanup(self):
# Used in unit tests to remove local database files.
# We have no such thing, so make this method a no-op.
pass
def close(self):
# WARNING: This does not handle the case where an app is shared by
# several Storage instances, but this is something that only
# happens in threaded tests (and this method is not called on
# extra Storages).
app = self.app
if app is not None:
self.app = None
app.close()
def getTid(self, oid):
try:
return self.app.getLastTID(oid)
except NEOStorageNotFoundError:
raise KeyError
def checkCurrentSerialInTransaction(self, oid, serial, transaction):
self.app.checkCurrentSerialInTransaction(oid, serial, transaction)
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/client/__init__.py 0000664 0000000 0000000 00000006551 12601037530 0025253 0 ustar 00root root 0000000 0000000 ##############################################################################
#
# Copyright (C) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
import app # set up signal handers early enough to do it in the main thread
if 1:
from hashlib import md5
from ZODB.Connection import Connection
def _check(f, *args):
h = md5(f.func_code.co_code).hexdigest()
assert h in args, h
# Allow serial to be returned as late as tpc_finish
#
# This makes possible for storage to allocate serial inside tpc_finish,
# removing the requirement to serialise second commit phase (tpc_vote
# to tpc_finish/tpc_abort).
_check(Connection.tpc_finish,
'ab9b1b8d82c40e5fffa84f7bc4ea3a8b', # Python 2.7
)
def tpc_finish(self, transaction):
"""Indicate confirmation that the transaction is done."""
def callback(tid):
if self._mvcc_storage:
# Inter-connection invalidation is not needed when the
# storage provides MVCC.
return
d = dict.fromkeys(self._modified)
self._db.invalidate(tid, d, self)
# It's important that the storage calls the passed function
# while it still has its lock. We don't want another thread
# to be able to read any updated data until we've had a chance
# to send an invalidation message to all of the other
# connections!
#
serial = self._storage.tpc_finish(transaction, callback)
if serial is not None:
assert isinstance(serial, str), repr(serial)
for oid_iterator in (self._modified, self._creating):
for oid in oid_iterator:
obj = self._cache.get(oid, None)
# Ignore missing objects and don't update ghosts.
if obj is not None and obj._p_changed is not None:
obj._p_changed = 0
obj._p_serial = serial
#
self._tpc_cleanup()
Connection.tpc_finish = tpc_finish
# IStorage implementations usually need to provide a "network barrier",
# at least for NEO & ZEO, to make sure we have an up-to-date view of
# the storage. It's unclear whether sync() is a good place to do this
# because a round-trip to the server introduces latency and we prefer
# it's not done when it's not useful.
# For example, we know we are up-to-date after a successful commit,
# so this should not be done in afterCompletion(), and anyway, we don't
# know any legitimate use of DB access outside a transaction.
_check(Connection.afterCompletion,
'cd3a080b80fd957190ff3bb867149448', # Python 2.7
)
def afterCompletion(self, *ignored):
self._readCurrent.clear()
# PATCH: do not call sync()
self._flush_invalidations()
Connection.afterCompletion = afterCompletion
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/client/app.py 0000664 0000000 0000000 00000115511 12601037530 0024271 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2006-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
from cPickle import dumps, loads
from zlib import compress, decompress
from random import shuffle
import heapq
import time
import weakref
from functools import partial
from ZODB.POSException import UndoError, StorageTransactionError, ConflictError
from ZODB.POSException import ReadConflictError
from ZODB.ConflictResolution import ResolvedSerial
from persistent.TimeStamp import TimeStamp
from neo.lib import logging
from neo.lib.protocol import NodeTypes, Packets, \
INVALID_PARTITION, ZERO_HASH, ZERO_TID
from neo.lib.event import EventManager
from neo.lib.util import makeChecksum, dump
from neo.lib.locking import Empty, Lock, SimpleQueue
from neo.lib.connection import MTClientConnection, ConnectionClosed
from neo.lib.node import NodeManager
from .exception import NEOStorageError, NEOStorageCreationUndoneError
from .exception import NEOStorageNotFoundError
from .handlers import storage, master
from neo.lib.dispatcher import Dispatcher, ForgottenPacket
from neo.lib.threaded_app import ThreadedApplication
from .cache import ClientCache
from .pool import ConnectionPool
from neo.lib.util import p64, u64, parseMasterList
from neo.lib.debug import register as registerLiveDebugger
CHECKED_SERIAL = master.CHECKED_SERIAL
try:
from Signals.Signals import SignalHandler
except ImportError:
SignalHandler = None
if SignalHandler:
import signal
SignalHandler.registerHandler(signal.SIGUSR2, logging.reopen)
class TransactionContainer(dict):
def pop(self, txn):
return dict.pop(self, id(txn), None)
def get(self, txn):
try:
return self[id(txn)]
except KeyError:
raise StorageTransactionError("unknown transaction %r" % txn)
def new(self, txn):
key = id(txn)
if key in self:
raise StorageTransactionError("commit of transaction %r"
" already started" % txn)
context = self[key] = {
'queue': SimpleQueue(),
'txn': txn,
'ttid': None,
'data_dict': {},
'data_size': 0,
'cache_dict': {},
'cache_size': 0,
'object_base_serial_dict': {},
'object_serial_dict': {},
'object_stored_counter_dict': {},
'conflict_serial_dict': {},
'resolved_conflict_serial_dict': {},
'involved_nodes': set(),
}
return context
class Application(ThreadedApplication):
"""The client node application."""
def __init__(self, master_nodes, name, compress=True, **kw):
super(Application, self).__init__(parseMasterList(master_nodes),
name, **kw)
# Internal Attributes common to all thread
self._db = None
self.cp = ConnectionPool(self)
self.primary_master_node = None
self.trying_master_node = None
# no self-assigned UUID, primary master will supply us one
self._cache = ClientCache()
self._loading_oid = None
self.new_oid_list = ()
self.last_oid = '\0' * 8
self.last_tid = None
self.storage_event_handler = storage.StorageEventHandler(self)
self.storage_bootstrap_handler = storage.StorageBootstrapHandler(self)
self.storage_handler = storage.StorageAnswersHandler(self)
self.primary_handler = master.PrimaryAnswersHandler(self)
self.primary_bootstrap_handler = master.PrimaryBootstrapHandler(self)
self.notifications_handler = master.PrimaryNotificationsHandler( self)
self._txn_container = TransactionContainer()
# Lock definition :
# _load_lock is used to make loading and storing atomic
lock = Lock()
self._load_lock_acquire = lock.acquire
self._load_lock_release = lock.release
# _oid_lock is used in order to not call multiple oid
# generation at the same time
lock = Lock()
self._oid_lock_acquire = lock.acquire
self._oid_lock_release = lock.release
lock = Lock()
# _cache_lock is used for the client cache
self._cache_lock_acquire = lock.acquire
self._cache_lock_release = lock.release
# _connecting_to_master_node is used to prevent simultaneous master
# node connection attemps
self._connecting_to_master_node = Lock()
self.compress = compress
def __getattr__(self, attr):
if attr == 'pt':
self._getMasterConnection()
return self.__getattribute__(attr)
@property
def txn_contexts(self):
# do not iter lazily to avoid race condition
return self._txn_container.values
def _waitAnyMessage(self, queue, block=True):
"""
Handle all pending packets.
block
If True (default), will block until at least one packet was
received.
"""
pending = self.dispatcher.pending
get = queue.get
_handlePacket = self._handlePacket
while pending(queue):
try:
conn, packet, kw = get(block)
except Empty:
break
if packet is None or isinstance(packet, ForgottenPacket):
# connection was closed or some packet was forgotten
continue
block = False
try:
_handlePacket(conn, packet, kw)
except ConnectionClosed:
pass
def _waitAnyTransactionMessage(self, txn_context, block=True):
"""
Just like _waitAnyMessage, but for per-transaction exchanges, rather
than per-thread.
"""
queue = txn_context['queue']
self.setHandlerData(txn_context)
try:
self._waitAnyMessage(queue, block=block)
finally:
# Don't leave access to thread context, even if a raise happens.
self.setHandlerData(None)
def _askStorage(self, conn, packet, **kw):
""" Send a request to a storage node and process its answer """
return self._ask(conn, packet, handler=self.storage_handler, **kw)
def _askPrimary(self, packet, **kw):
""" Send a request to the primary master and process its answer """
return self._ask(self._getMasterConnection(), packet,
handler=self.primary_handler, **kw)
def _getMasterConnection(self):
""" Connect to the primary master node on demand """
# For performance reasons, get 'master_conn' without locking.
result = self.master_conn
if result is None:
# If not connected, 'master_conn' must be tested again while we have
# the lock, to avoid concurrent threads reconnecting.
with self._connecting_to_master_node:
result = self.master_conn
if result is None:
self.new_oid_list = ()
result = self.master_conn = self._connectToPrimaryNode()
return result
def _connectToPrimaryNode(self):
"""
Lookup for the current primary master node
"""
logging.debug('connecting to primary master...')
self.start()
index = -1
ask = self._ask
handler = self.primary_bootstrap_handler
while 1:
# Get network connection to primary master
while 1:
if self.primary_master_node is not None:
# If I know a primary master node, pinpoint it.
self.trying_master_node = self.primary_master_node
self.primary_master_node = None
else:
# Otherwise, check one by one.
master_list = self.nm.getMasterList()
index = (index + 1) % len(master_list)
self.trying_master_node = master_list[index]
# Connect to master
conn = MTClientConnection(self.em,
self.notifications_handler,
node=self.trying_master_node,
dispatcher=self.dispatcher)
# Query for primary master node
if conn.getConnector() is None:
# This happens if a connection could not be established.
logging.error('Connection to master node %s failed',
self.trying_master_node)
continue
try:
ask(conn, Packets.RequestIdentification(
NodeTypes.CLIENT, self.uuid, None, self.name),
handler=handler)
except ConnectionClosed:
continue
# If we reached the primary master node, mark as connected
if self.primary_master_node is not None and \
self.primary_master_node is self.trying_master_node:
break
logging.info('Connected to %s', self.primary_master_node)
try:
# Request identification and required informations to be
# operational. Might raise ConnectionClosed so that the new
# primary can be looked-up again.
logging.info('Initializing from master')
ask(conn, Packets.AskNodeInformation(), handler=handler)
ask(conn, Packets.AskPartitionTable(), handler=handler)
ask(conn, Packets.AskLastTransaction(), handler=handler)
if self.pt.operational():
break
except ConnectionClosed:
logging.error('Connection to %s lost', self.trying_master_node)
self.primary_master_node = None
logging.info("Connected and ready")
return conn
def registerDB(self, db, limit):
self._db = db
def getDB(self):
return self._db
def new_oid(self):
"""Get a new OID."""
self._oid_lock_acquire()
try:
if not self.new_oid_list:
# Get new oid list from master node
# we manage a list of oid here to prevent
# from asking too many time new oid one by one
# from master node
self._askPrimary(Packets.AskNewOIDs(100))
if not self.new_oid_list:
raise NEOStorageError('new_oid failed')
self.last_oid = oid = self.new_oid_list.pop()
return oid
finally:
self._oid_lock_release()
def getObjectCount(self):
# return the last OID used, this is inaccurate
return int(u64(self.last_oid))
def load(self, oid, tid=None, before_tid=None):
"""
Internal method which manage load, loadSerial and loadBefore.
OID and TID (serial) parameters are expected packed.
oid
OID of object to get.
tid
If given, the exact serial at which OID is desired.
before_tid should be None.
before_tid
If given, the excluded upper bound serial at which OID is desired.
serial should be None.
Return value: (3-tuple)
- Object data (None if object creation was undone).
- Serial of given data.
- Next serial at which object exists, or None. Only set when tid
parameter is not None.
Exceptions:
NEOStorageError
technical problem
NEOStorageNotFoundError
object exists but no data satisfies given parameters
NEOStorageDoesNotExistError
object doesn't exist
NEOStorageCreationUndoneError
object existed, but its creation was undone
Note that loadSerial is used during conflict resolution to load
object's current version, which is not visible to us normaly (it was
committed after our snapshot was taken).
"""
# TODO:
# - rename parameters (here? and in handlers & packet definitions)
acquire = self._cache_lock_acquire
release = self._cache_lock_release
# XXX: Is it possible this giant lock ?
# See commit b77c946d67c9d7cc1e9ee9b15437568dee144aa4
# for a way to invalidate cache properly when several loads
# are done simultaneously.
self._load_lock_acquire()
try:
acquire()
try:
result = self._loadFromCache(oid, tid, before_tid)
if result:
return result
self._loading_oid = oid
finally:
release()
# When not bound to a ZODB Connection, load() may be the
# first method called and last_tid may still be None.
# This happens, for example, when opening the DB.
if not (tid or before_tid) and self.last_tid:
# Do not get something more recent than the last invalidation
# we got from master.
before_tid = p64(u64(self.last_tid) + 1)
data, tid, next_tid, _ = self._loadFromStorage(oid, tid, before_tid)
acquire()
try:
result = data, tid, (next_tid if self._loading_oid or next_tid
else self._loading_invalidated)
self._cache.store(oid, *result)
return result
finally:
release()
finally:
self._load_lock_release()
def _loadFromStorage(self, oid, at_tid, before_tid):
packet = Packets.AskObject(oid, at_tid, before_tid)
for node, conn in self.cp.iterateForObject(oid, readable=True):
try:
tid, next_tid, compression, checksum, data, data_tid \
= self._askStorage(conn, packet)
except ConnectionClosed:
continue
if data or checksum != ZERO_HASH:
if checksum != makeChecksum(data):
logging.error('wrong checksum from %s for oid %s',
conn, dump(oid))
continue
return (decompress(data) if compression else data,
tid, next_tid, data_tid)
raise NEOStorageCreationUndoneError(dump(oid))
raise NEOStorageError("storage down or corrupted data")
def _loadFromCache(self, oid, at_tid=None, before_tid=None):
"""
Load from local cache, return None if not found.
"""
if at_tid:
result = self._cache.load(oid, at_tid + '*')
assert not result or result[1] == at_tid
return result
return self._cache.load(oid, before_tid)
def tpc_begin(self, transaction, tid=None, status=' '):
"""Begin a new transaction."""
# First get a transaction, only one is allowed at a time
txn_context = self._txn_container.new(transaction)
# use the given TID or request a new one to the master
answer_ttid = self._askPrimary(Packets.AskBeginTransaction(tid))
if answer_ttid is None:
raise NEOStorageError('tpc_begin failed')
assert tid in (None, answer_ttid), (tid, answer_ttid)
txn_context['ttid'] = answer_ttid
def store(self, oid, serial, data, version, transaction):
"""Store object."""
logging.debug('storing oid %s serial %s', dump(oid), dump(serial))
self._store(self._txn_container.get(transaction), oid, serial, data)
def _store(self, txn_context, oid, serial, data, data_serial=None,
unlock=False):
ttid = txn_context['ttid']
if data is None:
# This is some undo: either a no-data object (undoing object
# creation) or a back-pointer to an earlier revision (going back to
# an older object revision).
compressed_data = ''
compression = 0
checksum = ZERO_HASH
else:
assert data_serial is None
size = len(data)
if self.compress:
compressed_data = compress(data)
if size < len(compressed_data):
compressed_data = data
compression = 0
else:
compression = 1
else:
compression = 0
compressed_data = data
checksum = makeChecksum(compressed_data)
txn_context['data_size'] += size
on_timeout = partial(
self.onStoreTimeout,
txn_context=txn_context,
oid=oid,
)
# Store object in tmp cache
txn_context['data_dict'][oid] = data
# Store data on each node
txn_context['object_stored_counter_dict'][oid] = {}
txn_context['object_base_serial_dict'].setdefault(oid, serial)
txn_context['object_serial_dict'][oid] = serial
queue = txn_context['queue']
involved_nodes = txn_context['involved_nodes']
add_involved_nodes = involved_nodes.add
packet = Packets.AskStoreObject(oid, serial, compression,
checksum, compressed_data, data_serial, ttid, unlock)
for node, conn in self.cp.iterateForObject(oid):
try:
conn.ask(packet, on_timeout=on_timeout, queue=queue)
add_involved_nodes(node)
except ConnectionClosed:
continue
if not involved_nodes:
raise NEOStorageError("Store failed")
while txn_context['data_size'] >= self._cache._max_size:
self._waitAnyTransactionMessage(txn_context)
self._waitAnyTransactionMessage(txn_context, False)
def onStoreTimeout(self, conn, msg_id, txn_context, oid):
# NOTE: this method is called from poll thread, don't use
# thread-specific value !
txn_context.setdefault('timeout_dict', {})[oid] = msg_id
# Ask the storage if someone locks the object.
# By sending a message with a smaller timeout,
# the connection will be kept open.
conn.ask(Packets.AskHasLock(txn_context['ttid'], oid),
timeout=5, queue=txn_context['queue'])
def _handleConflicts(self, txn_context, tryToResolveConflict):
result = []
append = result.append
# Check for conflicts
data_dict = txn_context['data_dict']
object_base_serial_dict = txn_context['object_base_serial_dict']
object_serial_dict = txn_context['object_serial_dict']
conflict_serial_dict = txn_context['conflict_serial_dict'].copy()
txn_context['conflict_serial_dict'].clear()
resolved_conflict_serial_dict = txn_context[
'resolved_conflict_serial_dict']
for oid, conflict_serial_set in conflict_serial_dict.iteritems():
conflict_serial = max(conflict_serial_set)
serial = object_serial_dict[oid]
if ZERO_TID in conflict_serial_set:
if 1:
# XXX: disable deadlock avoidance code until it is fixed
logging.info('Deadlock avoidance on %r:%r',
dump(oid), dump(serial))
# 'data' parameter of ConflictError is only used to report the
# class of the object. It doesn't matter if 'data' is None
# because the transaction is too big.
try:
data = data_dict[oid]
except KeyError:
data = txn_context['cache_dict'][oid]
else:
# Storage refused us from taking object lock, to avoid a
# possible deadlock. TID is actually used for some kind of
# "locking priority": when a higher value has the lock,
# this means we stored objects "too late", and we would
# otherwise cause a deadlock.
# To recover, we must ask storages to release locks we
# hold (to let possibly-competing transactions acquire
# them), and requeue our already-sent store requests.
# XXX: currently, brute-force is implemented: we send
# object data again.
# WARNING: not maintained code
logging.info('Deadlock avoidance triggered on %r:%r',
dump(oid), dump(serial))
for store_oid, store_data in data_dict.iteritems():
store_serial = object_serial_dict[store_oid]
if store_data is CHECKED_SERIAL:
self._checkCurrentSerialInTransaction(txn_context,
store_oid, store_serial)
else:
if store_data is None:
# Some undo
logging.warning('Deadlock avoidance cannot reliably'
' work with undo, this must be implemented.')
conflict_serial = ZERO_TID
break
self._store(txn_context, store_oid, store_serial,
store_data, unlock=True)
else:
continue
else:
data = data_dict.pop(oid)
if data is CHECKED_SERIAL:
raise ReadConflictError(oid=oid, serials=(conflict_serial,
serial))
if data: # XXX: can 'data' be None ???
txn_context['data_size'] -= len(data)
resolved_serial_set = resolved_conflict_serial_dict.setdefault(
oid, set())
if resolved_serial_set and conflict_serial <= max(
resolved_serial_set):
# A later serial has already been resolved, skip.
resolved_serial_set.update(conflict_serial_set)
continue
try:
new_data = tryToResolveConflict(oid, conflict_serial,
serial, data)
except ConflictError:
logging.info('Conflict resolution failed for '
'%r:%r with %r', dump(oid), dump(serial),
dump(conflict_serial))
else:
logging.info('Conflict resolution succeeded for '
'%r:%r with %r', dump(oid), dump(serial),
dump(conflict_serial))
# Mark this conflict as resolved
resolved_serial_set.update(conflict_serial_set)
# Base serial changes too, as we resolved a conflict
object_base_serial_dict[oid] = conflict_serial
# Try to store again
self._store(txn_context, oid, conflict_serial, new_data)
append(oid)
continue
raise ConflictError(oid=oid, serials=(conflict_serial,
serial), data=data)
return result
def waitResponses(self, queue):
"""Wait for all requests to be answered (or their connection to be
detected as closed)"""
pending = self.dispatcher.pending
_waitAnyMessage = self._waitAnyMessage
while pending(queue):
_waitAnyMessage(queue)
def waitStoreResponses(self, txn_context, tryToResolveConflict):
result = []
append = result.append
resolved_oid_set = set()
update = resolved_oid_set.update
_handleConflicts = self._handleConflicts
queue = txn_context['queue']
conflict_serial_dict = txn_context['conflict_serial_dict']
pending = self.dispatcher.pending
_waitAnyTransactionMessage = self._waitAnyTransactionMessage
while pending(queue) or conflict_serial_dict:
# Note: handler data can be overwritten by _handleConflicts
# so we must set it for each iteration.
_waitAnyTransactionMessage(txn_context)
if conflict_serial_dict:
conflicts = _handleConflicts(txn_context,
tryToResolveConflict)
if conflicts:
update(conflicts)
# Check for never-stored objects, and update result for all others
for oid, store_dict in \
txn_context['object_stored_counter_dict'].iteritems():
if not store_dict:
logging.error('tpc_store failed')
raise NEOStorageError('tpc_store failed')
elif oid in resolved_oid_set:
append((oid, ResolvedSerial))
return result
def tpc_vote(self, transaction, tryToResolveConflict):
"""Store current transaction."""
txn_context = self._txn_container.get(transaction)
result = self.waitStoreResponses(txn_context, tryToResolveConflict)
ttid = txn_context['ttid']
# Store data on each node
assert not txn_context['data_dict'], txn_context
packet = Packets.AskStoreTransaction(ttid, str(transaction.user),
str(transaction.description), dumps(transaction._extension),
txn_context['cache_dict'])
add_involved_nodes = txn_context['involved_nodes'].add
for node, conn in self.cp.iterateForObject(ttid):
logging.debug("voting transaction %s on %s", dump(ttid),
dump(conn.getUUID()))
try:
self._askStorage(conn, packet)
except ConnectionClosed:
continue
add_involved_nodes(node)
# check at least one storage node accepted
if txn_context['involved_nodes']:
txn_context['voted'] = None
# We must not go further if connection to master was lost since
# tpc_begin, to lower the probability of failing during tpc_finish.
if 'error' in txn_context:
raise NEOStorageError(txn_context['error'])
return result
logging.error('tpc_vote failed')
raise NEOStorageError('tpc_vote failed')
def tpc_abort(self, transaction):
"""Abort current transaction."""
txn_context = self._txn_container.pop(transaction)
if txn_context is None:
return
ttid = txn_context['ttid']
p = Packets.AbortTransaction(ttid)
getConnForNode = self.cp.getConnForNode
# cancel transaction one all those nodes
for node in txn_context['involved_nodes']:
conn = getConnForNode(node)
if conn is None:
continue
try:
conn.notify(p)
except:
logging.exception('Exception in tpc_abort while notifying'
'storage node %r of abortion, ignoring.', conn)
conn = self.master_conn
if conn is not None:
conn.notify(p)
# We don't need to flush queue, as it won't be reused by future
# transactions (deleted on next line & indexed by transaction object
# instance).
self.dispatcher.forget_queue(txn_context['queue'], flush_queue=False)
def tpc_finish(self, transaction, tryToResolveConflict, f=None):
"""Finish current transaction."""
txn_container = self._txn_container
if 'voted' not in txn_container.get(transaction):
self.tpc_vote(transaction, tryToResolveConflict)
self._load_lock_acquire()
try:
# Call finish on master
txn_context = txn_container.pop(transaction)
cache_dict = txn_context['cache_dict']
tid = self._askPrimary(Packets.AskFinishTransaction(
txn_context['ttid'], cache_dict),
cache_dict=cache_dict, callback=f)
assert tid
return tid
finally:
self._load_lock_release()
def undo(self, undone_tid, txn, tryToResolveConflict):
txn_context = self._txn_container.get(txn)
txn_info, txn_ext = self._getTransactionInformation(undone_tid)
txn_oid_list = txn_info['oids']
# Regroup objects per partition, to ask a minimum set of storage.
partition_oid_dict = {}
for oid in txn_oid_list:
partition = self.pt.getPartition(oid)
try:
oid_list = partition_oid_dict[partition]
except KeyError:
oid_list = partition_oid_dict[partition] = []
oid_list.append(oid)
# Ask storage the undo serial (serial at which object's previous data
# is)
getCellList = self.pt.getCellList
getCellSortKey = self.cp.getCellSortKey
getConnForCell = self.cp.getConnForCell
queue = self._thread_container.queue
ttid = txn_context['ttid']
undo_object_tid_dict = {}
snapshot_tid = p64(u64(self.last_tid) + 1)
for partition, oid_list in partition_oid_dict.iteritems():
cell_list = getCellList(partition, readable=True)
# We do want to shuffle before getting one with the smallest
# key, so that all cells with the same (smallest) key has
# identical chance to be chosen.
shuffle(cell_list)
storage_conn = getConnForCell(min(cell_list, key=getCellSortKey))
storage_conn.ask(Packets.AskObjectUndoSerial(ttid,
snapshot_tid, undone_tid, oid_list),
queue=queue, undo_object_tid_dict=undo_object_tid_dict)
# Wait for all AnswerObjectUndoSerial. We might get OidNotFoundError,
# meaning that objects in transaction's oid_list do not exist any
# longer. This is the symptom of a pack, so forbid undoing transaction
# when it happens.
try:
self.waitResponses(queue)
except NEOStorageNotFoundError:
self.dispatcher.forget_queue(queue)
raise UndoError('non-undoable transaction')
# Send undo data to all storage nodes.
for oid in txn_oid_list:
current_serial, undo_serial, is_current = undo_object_tid_dict[oid]
if is_current:
data = None
else:
# Serial being undone is not the latest version for this
# object. This is an undo conflict, try to resolve it.
try:
# Load the latest version we are supposed to see
data = self.load(oid, current_serial)[0]
# Load the version we were undoing to
undo_data = self.load(oid, undo_serial)[0]
except NEOStorageNotFoundError:
raise UndoError('Object not found while resolving undo '
'conflict')
# Resolve conflict
try:
data = tryToResolveConflict(oid, current_serial,
undone_tid, undo_data, data)
except ConflictError:
raise UndoError('Some data were modified by a later ' \
'transaction', oid)
undo_serial = None
self._store(txn_context, oid, current_serial, data, undo_serial)
return None, txn_oid_list
def _insertMetadata(self, txn_info, extension):
for k, v in loads(extension).items():
txn_info[k] = v
def _getTransactionInformation(self, tid):
packet = Packets.AskTransactionInformation(tid)
for node, conn in self.cp.iterateForObject(tid, readable=True):
try:
txn_info, txn_ext = self._askStorage(conn, packet)
except ConnectionClosed:
continue
except NEOStorageNotFoundError:
# TID not found
continue
break
else:
raise NEOStorageError('Transaction %r not found' % (tid, ))
return (txn_info, txn_ext)
def undoLog(self, first, last, filter=None, block=0):
# XXX: undoLog is broken
if last < 0:
# See FileStorage.py for explanation
last = first - last
# First get a list of transactions from all storage nodes.
# Each storage node will return TIDs only for UP_TO_DATE state and
# FEEDING state cells
queue = self._thread_container.queue
packet = Packets.AskTIDs(first, last, INVALID_PARTITION)
tid_set = set()
for storage_node in self.pt.getNodeSet(True):
conn = self.cp.getConnForNode(storage_node)
if conn is None:
continue
conn.ask(packet, queue=queue, tid_set=tid_set)
# Wait for answers from all storages.
self.waitResponses(queue)
# Reorder tids
ordered_tids = sorted(tid_set, reverse=True)
logging.debug("UndoLog tids %s", map(dump, ordered_tids))
# For each transaction, get info
undo_info = []
append = undo_info.append
for tid in ordered_tids:
(txn_info, txn_ext) = self._getTransactionInformation(tid)
if filter is None or filter(txn_info):
txn_info.pop('packed')
txn_info.pop("oids")
self._insertMetadata(txn_info, txn_ext)
append(txn_info)
if len(undo_info) >= last - first:
break
# Check we return at least one element, otherwise call
# again but extend offset
if len(undo_info) == 0 and not block:
undo_info = self.undoLog(first=first, last=last*5, filter=filter,
block=1)
return undo_info
def transactionLog(self, start, stop, limit):
tid_list = []
# request a tid list for each partition
for offset in xrange(self.pt.getPartitions()):
p = Packets.AskTIDsFrom(start, stop, limit, offset)
for node, conn in self.cp.iterateForObject(offset, readable=True):
try:
r = self._askStorage(conn, p)
break
except ConnectionClosed:
pass
else:
raise NEOStorageError('transactionLog failed')
if r:
tid_list = list(heapq.merge(tid_list, r))
if len(tid_list) >= limit:
del tid_list[limit:]
stop = tid_list[-1]
# request transactions informations
txn_list = []
append = txn_list.append
tid = None
for tid in tid_list:
(txn_info, txn_ext) = self._getTransactionInformation(tid)
txn_info['ext'] = loads(txn_ext)
append(txn_info)
return (tid, txn_list)
def history(self, oid, size=1, filter=None):
# Get history informations for object first
packet = Packets.AskObjectHistory(oid, 0, size)
for node, conn in self.cp.iterateForObject(oid, readable=True):
try:
history_list = self._askStorage(conn, packet)
except ConnectionClosed:
continue
# Now that we have object informations, get txn informations
result = []
# history_list is already sorted descending (by the storage)
for serial, size in history_list:
txn_info, txn_ext = self._getTransactionInformation(serial)
# create history dict
txn_info.pop('id')
txn_info.pop('oids')
txn_info.pop('packed')
txn_info['tid'] = serial
txn_info['version'] = ''
txn_info['size'] = size
if filter is None or filter(txn_info):
result.append(txn_info)
self._insertMetadata(txn_info, txn_ext)
return result
def importFrom(self, source, start, stop, tryToResolveConflict,
preindex=None):
# TODO: The main difference with BaseStorage implementation is that
# preindex can't be filled with the result 'store' (tid only
# known after 'tpc_finish'. This method could be dropped if we
# implemented IStorageRestoreable (a wrapper around source would
# still be required for partial import).
if preindex is None:
preindex = {}
for transaction in source.iterator(start, stop):
tid = transaction.tid
self.tpc_begin(transaction, tid, transaction.status)
for r in transaction:
oid = r.oid
pre = preindex.get(oid)
self.store(oid, pre, r.data, r.version, transaction)
preindex[oid] = tid
conflicted = self.tpc_vote(transaction, tryToResolveConflict)
assert not conflicted, conflicted
real_tid = self.tpc_finish(transaction, tryToResolveConflict)
assert real_tid == tid, (real_tid, tid)
from .iterator import iterator
def lastTransaction(self):
self._askPrimary(Packets.AskLastTransaction())
return self.last_tid
def pack(self, t):
tid = repr(TimeStamp(*time.gmtime(t)[:5] + (t % 60, )))
if tid == ZERO_TID:
raise NEOStorageError('Invalid pack time')
self._askPrimary(Packets.AskPack(tid))
# XXX: this is only needed to make ZODB unit tests pass.
# It should not be otherwise required (clients should be free to load
# old data as long as it is available in cache, event if it was pruned
# by a pack), so don't bother invalidating on other clients.
self._cache_lock_acquire()
try:
self._cache.clear()
finally:
self._cache_lock_release()
def getLastTID(self, oid):
return self.load(oid)[1]
def checkCurrentSerialInTransaction(self, oid, serial, transaction):
self._checkCurrentSerialInTransaction(
self._txn_container.get(transaction), oid, serial)
def _checkCurrentSerialInTransaction(self, txn_context, oid, serial):
ttid = txn_context['ttid']
txn_context['object_serial_dict'][oid] = serial
# Placeholders
queue = txn_context['queue']
txn_context['object_stored_counter_dict'][oid] = {}
# ZODB.Connection performs calls 'checkCurrentSerialInTransaction'
# after stores, and skips oids that have been succeessfully stored.
assert oid not in txn_context['cache_dict'], (oid, txn_context)
txn_context['data_dict'].setdefault(oid, CHECKED_SERIAL)
packet = Packets.AskCheckCurrentSerial(ttid, serial, oid)
for node, conn in self.cp.iterateForObject(oid):
try:
conn.ask(packet, queue=queue)
except ConnectionClosed:
continue
self._waitAnyTransactionMessage(txn_context, False)
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/client/cache.py 0000664 0000000 0000000 00000023725 12601037530 0024561 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2011-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import math
from bisect import insort
class CacheItem(object):
__slots__ = ('oid', 'tid', 'next_tid', 'data',
'counter', 'level', 'expire',
'prev', 'next')
def __repr__(self):
s = ''
for attr in self.__slots__:
try:
value = getattr(self, attr)
if value:
if attr in ('prev', 'next'):
s += ' %s=<...>' % attr
continue
elif attr == 'data':
value = '...'
s += ' %s=%r' % (attr, value)
except AttributeError:
pass
return '<%s%s>' % (self.__class__.__name__, s)
def __lt__(self, other):
return self.tid < other.tid
class ClientCache(object):
"""In-memory pickle cache based on Multi-Queue cache algorithm
Multi-Queue algorithm for Second Level Buffer Caches:
http://www.usenix.org/event/usenix01/full_papers/zhou/zhou_html/index.html
Quick description:
- There are multiple "regular" queues, plus a history queue
- The queue to store an object in depends on its access frequency
- The queue an object is in defines its lifespan (higher-index queue eq.
longer lifespan)
-> The more often an object is accessed, the higher lifespan it will
have
- Upon cache or history hit, object frequency is increased and object
might get moved to longer-lived queue
- Each access "ages" objects in cache, and an aging object is moved to
shorter-lived queue as it ages without being accessed, or in the
history queue if it's really too old.
"""
__slots__ = ('_life_time', '_max_history_size', '_max_size',
'_queue_list', '_oid_dict', '_time', '_size', '_history_size')
def __init__(self, life_time=10000, max_history_size=100000,
max_size=20*1024*1024):
self._life_time = life_time
self._max_history_size = max_history_size
self._max_size = max_size
self.clear()
def clear(self):
"""Reset cache"""
self._queue_list = [None] # first is history
self._oid_dict = {}
self._time = 0
self._size = 0
self._history_size = 0
def _iterQueue(self, level):
"""for debugging purpose"""
if level < len(self._queue_list):
item = head = self._queue_list[level]
if item:
while 1:
yield item
item = item.next
if item is head:
break
def _add(self, item):
level = item.level
try:
head = self._queue_list[level]
except IndexError:
assert len(self._queue_list) == level
self._queue_list.append(item)
item.prev = item.next = item
else:
if head:
item.prev = tail = head.prev
tail.next = head.prev = item
item.next = head
else:
self._queue_list[level] = item
item.prev = item.next = item
if level:
item.expire = self._time + self._life_time
else:
self._size -= len(item.data)
item.data = None
if self._history_size < self._max_history_size:
self._history_size += 1
else:
self._remove(head)
item_list = self._oid_dict[head.oid]
item_list.remove(head)
if not item_list:
del self._oid_dict[head.oid]
def _remove(self, item):
level = item.level
if level is not None:
item.level = level - 1
next = item.next
if next is item:
self._queue_list[level] = next = None
else:
item.prev.next = next
next.prev = item.prev
if self._queue_list[level] is item:
self._queue_list[level] = next
return next
def _fetched(self, item, _log=math.log):
self._remove(item)
item.counter = counter = item.counter + 1
# XXX It might be better to adjust the level according to the object
# size. See commented factor for example.
item.level = 1 + int(_log(counter, 2)
# * (1.01 - float(len(item.data)) / self._max_size)
)
self._add(item)
self._time = time = self._time + 1
for head in self._queue_list[1:]:
if head and head.expire < time:
self._remove(head)
self._add(head)
break
def _load(self, oid, before_tid=None):
item_list = self._oid_dict.get(oid)
if item_list:
if before_tid:
for item in reversed(item_list):
if item.tid < before_tid:
next_tid = item.next_tid
if next_tid and next_tid < before_tid:
break
return item
else:
item = item_list[-1]
if not item.next_tid:
return item
def load(self, oid, before_tid=None):
"""Return a revision of oid that was current before given tid"""
item = self._load(oid, before_tid)
if item:
data = item.data
if data is not None:
self._fetched(item)
return data, item.tid, item.next_tid
def store(self, oid, data, tid, next_tid):
"""Store a new data record in the cache"""
size = len(data)
max_size = self._max_size
if size < max_size:
item = self._load(oid, next_tid)
if item:
assert item.tid == tid and item.next_tid == next_tid
if item.level: # already stored
assert item.data == data
return
assert not item.data
self._history_size -= 1
else:
item = CacheItem()
item.oid = oid
item.tid = tid
item.next_tid = next_tid
item.counter = 0
item.level = None
try:
item_list = self._oid_dict[oid]
except KeyError:
self._oid_dict[oid] = [item]
else:
if next_tid:
insort(item_list, item)
else:
prev = item_list[-1]
item.counter = prev.counter
prev.counter = 0
if prev.level > 1:
self._fetched(prev)
item_list.append(item)
item.data = data
self._fetched(item)
self._size += size
if max_size < self._size:
for head in self._queue_list[1:]:
while head:
next = self._remove(head)
head.level = 0
self._add(head)
if self._size <= max_size:
return
head = next
def invalidate(self, oid, tid):
"""Mark data record as being valid only up to given tid"""
try:
item = self._oid_dict[oid][-1]
except KeyError:
pass
else:
if item.next_tid is None:
item.next_tid = tid
else:
assert item.next_tid <= tid, (item, oid, tid)
def clear_current(self):
oid_list = []
for oid, item_list in self._oid_dict.items():
item = item_list[-1]
if item.next_tid is None:
self._remove(item)
del item_list[-1]
# We don't preserve statistics of removed items. This could be
# done easily when previous versions are cached, by copying
# counters, but it would not be fair for other oids, so it's
# probably not worth it.
if not item_list:
del self._oid_dict[oid]
oid_list.append(oid)
return oid_list
def test(self):
cache = ClientCache()
self.assertEqual(cache.load(1, 10), None)
self.assertEqual(cache.load(1, None), None)
cache.invalidate(1, 10)
data = '5', 5, 10
# 2 identical stores happens if 2 threads got a cache miss at the same time
cache.store(1, *data)
cache.store(1, *data)
self.assertEqual(cache.load(1, 10), data)
self.assertEqual(cache.load(1, None), None)
data = '15', 15, None
cache.store(1, *data)
self.assertEqual(cache.load(1, None), data)
self.assertEqual(cache.clear_current(), [1])
self.assertEqual(cache.load(1, None), None)
cache.store(1, *data)
cache.invalidate(1, 20)
self.assertEqual(cache.clear_current(), [])
self.assertEqual(cache.load(1, 20), ('15', 15, 20))
cache.store(1, '10', 10, 15)
cache.store(1, '20', 20, 21)
self.assertEqual([5, 10, 15, 20], [x.tid for x in cache._oid_dict[1]])
if __name__ == '__main__':
import unittest
unittest.TextTestRunner().run(type('', (unittest.TestCase,), {
'runTest': test})())
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/client/component.xml 0000664 0000000 0000000 00000003022 12601037530 0025654 0 ustar 00root root 0000000 0000000
A scalable storage for Zope
Give the list of the master node like ip:port ip:port...
Give the name of the cluster
If true, data is automatically compressed (unless compressed size is
not smaller). This is the default behaviour.
If true, only reads may be executed against the storage. Note
that the "pack" operation is not considered a write operation
and is still allowed on a read-only neostorage.
Log debugging information to specified SQLite DB.
The file designated by this option contains an updated list of master
nodes which are known to be part of current cluster, so new nodes can
be added/removed without requiring a config change each time.
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/client/config.py 0000664 0000000 0000000 00000001660 12601037530 0024755 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2006-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
from ZODB.config import BaseConfig
class NeoStorage(BaseConfig):
def open(self):
from .Storage import Storage
config = self.config
return Storage(**{k: getattr(config, k)
for k in config.getSectionAttributes()})
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/client/exception.py 0000664 0000000 0000000 00000002407 12601037530 0025506 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2006-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
from ZODB import POSException
class NEOStorageError(POSException.StorageError):
pass
class NEOStorageNotFoundError(NEOStorageError):
pass
class NEOStorageDoesNotExistError(NEOStorageNotFoundError):
"""
This error is a refinement of NEOStorageNotFoundError: this means
that some object was not found, but also that it does not exist at all.
"""
pass
class NEOStorageCreationUndoneError(NEOStorageDoesNotExistError):
"""
This error is a refinement of NEOStorageDoesNotExistError: this means that
some object existed at some point, but its creation was undone.
"""
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/client/handlers/ 0000775 0000000 0000000 00000000000 12601037530 0024733 5 ustar 00root root 0000000 0000000 neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/client/handlers/__init__.py 0000664 0000000 0000000 00000001616 12601037530 0027050 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2006-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
from neo.lib import handler
from ZODB.POSException import StorageError
class AnswerBaseHandler(handler.AnswerBaseHandler): # XXX
def protocolError(self, conn, message):
raise StorageError("protocol error: %s" % message)
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/client/handlers/master.py 0000664 0000000 0000000 00000017166 12601037530 0026613 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2006-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
from neo.lib import logging
from neo.lib.handler import MTEventHandler
from neo.lib.pt import MTPartitionTable as PartitionTable
from neo.lib.protocol import NodeStates, Packets, ProtocolError
from neo.lib.util import dump, add64
from . import AnswerBaseHandler
from ..exception import NEOStorageError
CHECKED_SERIAL = object()
class PrimaryBootstrapHandler(AnswerBaseHandler):
""" Bootstrap handler used when looking for the primary master """
def notReady(self, conn, message):
self.app.trying_master_node = None
conn.close()
def _acceptIdentification(self, node, uuid, num_partitions,
num_replicas, your_uuid, primary, known_master_list):
app = self.app
# Register new master nodes.
found = False
conn_address = node.getAddress()
for node_address, node_uuid in known_master_list:
if node_address == conn_address:
assert uuid == node_uuid, (dump(uuid), dump(node_uuid))
found = True
n = app.nm.getByAddress(node_address)
if n is None:
n = app.nm.createMaster(address=node_address)
if node_uuid is not None and n.getUUID() != node_uuid:
n.setUUID(node_uuid)
assert found, (node, dump(uuid), known_master_list)
conn = node.getConnection()
if primary is not None:
primary_node = app.nm.getByAddress(primary)
if primary_node is None:
# I don't know such a node. Probably this information
# is old. So ignore it.
logging.warning('Unknown primary master: %s. Ignoring.',
primary)
return
else:
if app.trying_master_node is not primary_node:
app.trying_master_node = None
conn.close()
app.primary_master_node = primary_node
else:
if app.primary_master_node is not None:
# The primary master node is not a primary master node
# any longer.
app.primary_master_node = None
app.trying_master_node = None
conn.close()
return
# the master must give an UUID
if your_uuid is None:
raise ProtocolError('No UUID supplied')
app.uuid = your_uuid
logging.info('Got an UUID: %s', dump(app.uuid))
# Always create partition table
app.pt = PartitionTable(num_partitions, num_replicas)
def answerPartitionTable(self, conn, ptid, row_list):
assert row_list
self.app.pt.load(ptid, row_list, self.app.nm)
def answerNodeInformation(self, conn):
pass
def answerLastTransaction(self, conn, ltid):
pass
class PrimaryNotificationsHandler(MTEventHandler):
""" Handler that process the notifications from the primary master """
def packetReceived(self, conn, packet, kw={}):
if type(packet) is Packets.AnswerLastTransaction:
app = self.app
ltid = packet.decode()[0]
if app.last_tid != ltid:
if app.master_conn is None:
app._cache_lock_acquire()
try:
oid_list = app._cache.clear_current()
db = app.getDB()
if db is not None:
db.invalidate(app.last_tid and
add64(app.last_tid, 1), oid_list)
finally:
app._cache_lock_release()
app.last_tid = ltid
elif type(packet) is Packets.AnswerTransactionFinished:
app = self.app
app.last_tid = tid = packet.decode()[1]
callback = kw.pop('callback')
# Update cache
cache = app._cache
app._cache_lock_acquire()
try:
for oid, data in kw.pop('cache_dict').iteritems():
if data is CHECKED_SERIAL:
# this is just a remain of
# checkCurrentSerialInTransaction call, ignore (no data
# was modified).
continue
# Update ex-latest value in cache
cache.invalidate(oid, tid)
if data is not None:
# Store in cache with no next_tid
cache.store(oid, data, tid, None)
if callback is not None:
callback(tid)
finally:
app._cache_lock_release()
MTEventHandler.packetReceived(self, conn, packet, kw)
def connectionClosed(self, conn):
app = self.app
if app.master_conn is not None:
msg = "connection to primary master node closed"
logging.critical(msg)
app.master_conn = None
for txn_context in app.txn_contexts():
txn_context['error'] = msg
app.primary_master_node = None
super(PrimaryNotificationsHandler, self).connectionClosed(conn)
def stopOperation(self, conn):
logging.critical("master node ask to stop operation")
def invalidateObjects(self, conn, tid, oid_list):
app = self.app
app.last_tid = tid
app._cache_lock_acquire()
try:
invalidate = app._cache.invalidate
loading = app._loading_oid
for oid in oid_list:
invalidate(oid, tid)
if oid == loading:
app._loading_oid = None
app._loading_invalidated = tid
db = app.getDB()
if db is not None:
db.invalidate(tid, oid_list)
finally:
app._cache_lock_release()
def notifyPartitionChanges(self, conn, ptid, cell_list):
if self.app.pt.filled():
self.app.pt.update(ptid, cell_list, self.app.nm)
def notifyNodeInformation(self, conn, node_list):
nm = self.app.nm
nm.update(node_list)
# XXX: 'update' automatically closes DOWN nodes. Do we really want
# to do the same thing for nodes in other non-running states ?
for node_type, addr, uuid, state in node_list:
if state != NodeStates.RUNNING:
node = nm.getByUUID(uuid)
if node and node.isConnected():
node.getConnection().close()
class PrimaryAnswersHandler(AnswerBaseHandler):
""" Handle that process expected packets from the primary master """
def answerBeginTransaction(self, conn, ttid):
self.app.setHandlerData(ttid)
def answerNewOIDs(self, conn, oid_list):
oid_list.reverse()
self.app.new_oid_list = oid_list
def answerTransactionFinished(self, conn, _, tid):
self.app.setHandlerData(tid)
def answerPack(self, conn, status):
if not status:
raise NEOStorageError('Already packing')
def answerLastTransaction(self, conn, ltid):
pass
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/client/handlers/storage.py 0000664 0000000 0000000 00000020276 12601037530 0026760 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2006-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
from ZODB.TimeStamp import TimeStamp
from ZODB.POSException import ConflictError
from neo.lib import logging
from neo.lib.protocol import LockState, ZERO_TID
from neo.lib.util import dump
from neo.lib.exception import NodeNotReady
from neo.lib.handler import MTEventHandler
from . import AnswerBaseHandler
from ..exception import NEOStorageError, NEOStorageNotFoundError
from ..exception import NEOStorageDoesNotExistError
class StorageEventHandler(MTEventHandler):
def connectionLost(self, conn, new_state):
node = self.app.nm.getByAddress(conn.getAddress())
assert node is not None
self.app.cp.removeConnection(node)
self.app.dispatcher.unregister(conn)
def connectionFailed(self, conn):
# Connection to a storage node failed
node = self.app.nm.getByAddress(conn.getAddress())
assert node is not None
self.app.cp.removeConnection(node)
super(StorageEventHandler, self).connectionFailed(conn)
class StorageBootstrapHandler(AnswerBaseHandler):
""" Handler used when connecting to a storage node """
def notReady(self, conn, message):
conn.close()
raise NodeNotReady(message)
def _acceptIdentification(self, node,
uuid, num_partitions, num_replicas, your_uuid, primary,
master_list):
assert self.app.master_conn is None or \
primary == self.app.master_conn.getAddress(), (
primary, self.app.master_conn)
assert uuid == node.getUUID(), (uuid, node.getUUID())
class StorageAnswersHandler(AnswerBaseHandler):
""" Handle all messages related to ZODB operations """
def answerObject(self, conn, oid, *args):
self.app.setHandlerData(args)
def answerStoreObject(self, conn, conflicting, oid, serial):
txn_context = self.app.getHandlerData()
object_stored_counter_dict = txn_context[
'object_stored_counter_dict'][oid]
if conflicting:
# Warning: if a storage (S1) is much faster than another (S2), then
# we may process entirely a conflict with S1 (i.e. we received the
# answer to the store of the resolved object on S1) before we
# receive the conflict answer from the first store on S2.
logging.info('%r report a conflict for %r with %r',
conn, dump(oid), dump(serial))
# If this conflict is not already resolved, mark it for
# resolution.
if serial not in txn_context[
'resolved_conflict_serial_dict'].get(oid, ()):
if serial in object_stored_counter_dict and serial != ZERO_TID:
raise NEOStorageError('Storages %s accepted object %s'
' for serial %s but %s reports a conflict for it.' % (
map(dump, object_stored_counter_dict[serial]),
dump(oid), dump(serial), dump(conn.getUUID())))
conflict_serial_dict = txn_context['conflict_serial_dict']
conflict_serial_dict.setdefault(oid, set()).add(serial)
else:
uuid_set = object_stored_counter_dict.get(serial)
if uuid_set is None: # store to first storage node
object_stored_counter_dict[serial] = uuid_set = set()
try:
data = txn_context['data_dict'].pop(oid)
except KeyError: # multiple undo
assert txn_context['cache_dict'][oid] is None, oid
else:
if type(data) is str:
size = len(data)
txn_context['data_size'] -= size
size += txn_context['cache_size']
if size < self.app._cache._max_size:
txn_context['cache_size'] = size
else:
# Do not cache data past cache max size, as it
# would just flush it on tpc_finish. This also
# prevents memory errors for big transactions.
data = None
txn_context['cache_dict'][oid] = data
else: # replica
assert oid not in txn_context['data_dict'], oid
uuid_set.add(conn.getUUID())
answerCheckCurrentSerial = answerStoreObject
def answerStoreTransaction(self, conn, _):
pass
def answerTIDsFrom(self, conn, tid_list):
logging.debug('Get %u TIDs from %r', len(tid_list), conn)
self.app.setHandlerData(tid_list)
def answerTransactionInformation(self, conn, tid,
user, desc, ext, packed, oid_list):
self.app.setHandlerData(({
'time': TimeStamp(tid).timeTime(),
'user_name': user,
'description': desc,
'id': tid,
'oids': oid_list,
'packed': packed,
}, ext))
def answerObjectHistory(self, conn, _, history_list):
# history_list is a list of tuple (serial, size)
self.app.setHandlerData(history_list)
def oidNotFound(self, conn, message):
# This can happen either when :
# - loading an object
# - asking for history
raise NEOStorageNotFoundError(message)
def oidDoesNotExist(self, conn, message):
raise NEOStorageDoesNotExistError(message)
def tidNotFound(self, conn, message):
# This can happen when requiring txn informations
raise NEOStorageNotFoundError(message)
def answerTIDs(self, conn, tid_list, tid_set):
tid_set.update(tid_list)
def answerObjectUndoSerial(self, conn, object_tid_dict,
undo_object_tid_dict):
undo_object_tid_dict.update(object_tid_dict)
def answerHasLock(self, conn, oid, status):
store_msg_id = self.app.getHandlerData()['timeout_dict'].pop(oid)
if status == LockState.GRANTED_TO_OTHER:
# Stop expecting the timed-out store request.
self.app.dispatcher.forget(conn, store_msg_id)
# Object is locked by another transaction, and we have waited until
# timeout. To avoid a deadlock, abort current transaction (we might
# be locking objects the other transaction is waiting for).
raise ConflictError, 'Lock wait timeout for oid %s on %r' % (
dump(oid), conn)
# HasLock design required that storage is multi-threaded so that
# it can answer to AskHasLock while processing store resquests.
# This means that the 2 cases (granted to us or nobody) are legitimate,
# either because it gave us the lock but is/was slow to store our data,
# or because the storage took a lot of time processing a previous
# store (and did not even considered our lock request).
# XXX: But storage nodes are still mono-threaded, so they should
# only answer with GRANTED_TO_OTHER (if they reply!), except
# maybe in very rare cases of race condition. Only log for now.
# This also means that most of the time, if the storage is slow
# to process some store requests, HasLock will timeout in turn
# and the connector will be closed.
# Anyway, it's not clear that HasLock requests are useful.
# Are store requests potentially long to process ? If not,
# we should simply raise a ConflictError on store timeout.
logging.info('Store of oid %s delayed (storage overload ?)', dump(oid))
def alreadyPendingError(self, conn, message):
pass
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/client/iterator.py 0000664 0000000 0000000 00000005120 12601037530 0025334 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2006-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
from ZODB import BaseStorage
from neo.lib.protocol import ZERO_TID, MAX_TID
from neo.lib.util import u64, add64
from .exception import NEOStorageCreationUndoneError, NEOStorageNotFoundError
CHUNK_LENGTH = 100
class Record(BaseStorage.DataRecord):
""" BaseStorage Transaction record yielded by the Transaction object """
def __str__(self):
oid = u64(self.oid)
tid = u64(self.tid)
args = (oid, tid, len(self.data), self.data_txn)
return 'Record %s:%s: %s (%s)' % args
class Transaction(BaseStorage.TransactionRecord):
""" Transaction object yielded by the NEO iterator """
def __init__(self, app, txn):
super(Transaction, self).__init__(txn['id'], ' ',
txn['user_name'], txn['description'], txn['ext'])
self.app = app
self.oid_list = txn['oids']
def __iter__(self):
""" Iterate over the transaction records """
load = self.app._loadFromStorage
for oid in self.oid_list:
try:
data, _, _, data_tid = load(oid, self.tid, None)
except NEOStorageCreationUndoneError:
data = data_tid = None
except NEOStorageNotFoundError:
# Transactions are not updated after a pack, so their object
# will not be found in the database. Skip them.
continue
yield Record(oid, self.tid, data, data_tid)
def __str__(self):
return 'Transaction #%s: %s %s' \
% (u64(self.tid), self.user, self.status)
def iterator(app, start=None, stop=None):
"""NEO transaction iterator"""
if start is None:
start = ZERO_TID
stop = min(stop or MAX_TID, app.lastTransaction())
while 1:
max_tid, chunk = app.transactionLog(start, stop, CHUNK_LENGTH)
if not chunk:
break # nothing more
for txn in chunk:
yield Transaction(app, txn)
start = add64(max_tid, 1)
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/client/pool.py 0000664 0000000 0000000 00000014366 12601037530 0024470 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2006-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import time
from random import shuffle
from neo.lib import logging
from neo.lib.locking import Lock
from neo.lib.protocol import NodeTypes, Packets
from neo.lib.connection import MTClientConnection, ConnectionClosed
from neo.lib.exception import NodeNotReady
from .exception import NEOStorageError
# How long before we might retry a connection to a node to which connection
# failed in the past.
MAX_FAILURE_AGE = 600
# Cell list sort keys
# We are connected to storage node hosting cell, high priority
CELL_CONNECTED = -1
# normal priority
CELL_GOOD = 0
# Storage node hosting cell failed recently, low priority
CELL_FAILED = 1
class ConnectionPool(object):
"""This class manages a pool of connections to storage nodes."""
def __init__(self, app, max_pool_size = 25):
self.app = app
self.max_pool_size = max_pool_size
self.connection_dict = {}
# Define a lock in order to create one connection to
# a storage node at a time to avoid multiple connections
# to the same node.
self._lock = Lock()
self.node_failure_dict = {}
def _initNodeConnection(self, node):
"""Init a connection to a given storage node."""
app = self.app
logging.debug('trying to connect to %s - %s', node, node.getState())
conn = MTClientConnection(app.em, app.storage_event_handler, node,
dispatcher=app.dispatcher)
p = Packets.RequestIdentification(NodeTypes.CLIENT,
app.uuid, None, app.name)
try:
app._ask(conn, p, handler=app.storage_bootstrap_handler)
except ConnectionClosed:
logging.error('Connection to %r failed', node)
except NodeNotReady:
logging.info('%r not ready', node)
else:
logging.info('Connected %r', node)
return conn
self.notifyFailure(node)
def _dropConnections(self):
"""Drop connections."""
for conn in self.connection_dict.values():
# Drop first connection which looks not used
with conn.lock:
if not conn.pending() and \
not self.app.dispatcher.registered(conn):
del self.connection_dict[conn.getUUID()]
conn.setReconnectionNoDelay()
conn.close()
logging.debug('_dropConnections: connection to '
'storage node %s:%d closed', *conn.getAddress())
if len(self.connection_dict) <= self.max_pool_size:
break
def notifyFailure(self, node):
self.node_failure_dict[node.getUUID()] = time.time() + MAX_FAILURE_AGE
def getCellSortKey(self, cell):
uuid = cell.getUUID()
if uuid in self.connection_dict:
return CELL_CONNECTED
failure = self.node_failure_dict.get(uuid)
if failure is None or failure < time.time():
return CELL_GOOD
return CELL_FAILED
def getConnForCell(self, cell):
return self.getConnForNode(cell.getNode())
def iterateForObject(self, object_id, readable=False):
""" Iterate over nodes managing an object """
pt = self.app.pt
if type(object_id) is str:
object_id = pt.getPartition(object_id)
cell_list = pt.getCellList(object_id, readable)
if not cell_list:
raise NEOStorageError('no storage available')
getConnForNode = self.getConnForNode
while 1:
new_cell_list = []
# Shuffle to randomise node to access...
shuffle(cell_list)
# ...and sort with non-unique keys, to prioritise ranges of
# randomised entries.
cell_list.sort(key=self.getCellSortKey)
for cell in cell_list:
node = cell.getNode()
conn = getConnForNode(node)
if conn is not None:
yield node, conn
# Re-check if node is running, as our knowledge of its
# state can have changed during connection attempt.
elif node.isRunning():
new_cell_list.append(cell)
if not new_cell_list or self.app.master_conn is None:
break
cell_list = new_cell_list
def getConnForNode(self, node):
"""Return a locked connection object to a given node
If no connection exists, create a new one"""
if node.isRunning():
uuid = node.getUUID()
try:
# Already connected to node
return self.connection_dict[uuid]
except KeyError:
with self._lock:
# Second lookup, if another thread initiated connection
# while we were waiting for connection lock.
try:
return self.connection_dict[uuid]
except KeyError:
if len(self.connection_dict) > self.max_pool_size:
# must drop some unused connections
self._dropConnections()
# Create new connection to node
conn = self._initNodeConnection(node)
if conn is not None:
self.connection_dict[uuid] = conn
return conn
def removeConnection(self, node):
"""Explicitly remove connection when a node is broken."""
self.connection_dict.pop(node.getUUID(), None)
def flush(self):
"""Remove all connections"""
self.connection_dict.clear()
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/debug.py 0000664 0000000 0000000 00000005266 12601037530 0023326 0 ustar 00root root 0000000 0000000 """Example of script starting a debugger on RTMIN+3 signal
The pdb is launched in a separate thread in order not to trigger timeouts.
The prompt is accessible through network in case that the process is daemonized:
$ socat READLINE TCP:127.0.0.1:54930
> neo/debug.py(63)pdb()
-> app # this is Application instance
(Pdb) app
"""
IF = 'pdb'
if IF == 'pdb':
import socket, sys, threading
from neo.lib.debug import getPdb
#from pdb import Pdb as getPdb
class Socket(object):
def __init__(self, socket):
self._socket = socket
self._buf = ''
def write(self, data):
self._socket.send(data)
def readline(self):
recv = self._socket.recv
data = self._buf
while True:
i = 1 + data.find('\n')
if i:
self._buf = data[i:]
return data[:i]
d = recv(4096)
data += d
if not d:
self._buf = ''
return data
def flush(self):
pass
def closed(self):
self._socket.setblocking(0)
try:
self._socket.recv(0)
return True
except socket.error, (err, _):
if err != errno.EAGAIN:
raise
self._socket.setblocking(1)
return False
def pdb(app_set):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind(('127.0.0.1', 0))
s.listen(0)
print 'Listening to %u' % s.getsockname()[1]
_socket = Socket(s.accept()[0])
finally:
s.close()
try:
app, = app_set
except ValueError:
app = None
getPdb(stdin=_socket, stdout=_socket).set_trace()
app # this is Application instance (see 'app_set' if there are several)
try:
app_set = sys.modules['neo.client.app'].app_set
except KeyError:
f = sys._getframe(3)
try:
while f.f_code.co_name != 'run' or \
f.f_locals.get('self').__class__.__name__ != 'Application':
f = f.f_back
app_set = f.f_locals['self'],
except AttributeError:
app_set = ()
finally:
del f
threading.Thread(target=pdb, args=(app_set,)).start()
elif IF == 'frames':
import sys, traceback
write = sys.stderr.write
for thread_id, frame in sys._current_frames().iteritems():
write("Thread %s:\n" % thread_id)
traceback.print_stack(frame)
write("End of dump\n")
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/lib/ 0000775 0000000 0000000 00000000000 12601037530 0022423 5 ustar 00root root 0000000 0000000 neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/lib/__init__.py 0000664 0000000 0000000 00000001300 12601037530 0024526 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2006-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
from .logger import logging
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/lib/app.py 0000664 0000000 0000000 00000002404 12601037530 0023555 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
from .event import EventManager
from .node import NodeManager
class BaseApplication(object):
def __init__(self, dynamic_master_list=None):
self._handlers = {}
self.em = EventManager()
self.nm = NodeManager(dynamic_master_list)
# XXX: Do not implement __del__ unless all references to the Application
# become weak.
# Due to cyclic references, Python < 3.4 would never call it unless
# it's closed explicitly, and in this case, there's nothing to do.
def close(self):
self.nm.close()
self.em.close()
self.__dict__.clear()
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/lib/attributeTracker.py 0000664 0000000 0000000 00000003673 12601037530 0026325 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2006-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
ATTRIBUTE_TRACKER_ENABLED = False
from .locking import LockUser
"""
Usage example:
from neo import attributeTracker
class Foo(object):
...
def assertBar(self, expected_value):
if self.bar_attr != expected_value:
attributeTracker.whoSet(self, 'bar_attr')
attributeTracker.track(Foo)
"""
MODIFICATION_CONTAINER_ID = '_attribute_tracker_dict'
def tracker_setattr(self, attr, value, setattr):
modification_container = getattr(self, MODIFICATION_CONTAINER_ID, None)
if modification_container is None:
modification_container = {}
setattr(self, MODIFICATION_CONTAINER_ID, modification_container)
modification_container[attr] = LockUser()
setattr(self, attr, value)
if ATTRIBUTE_TRACKER_ENABLED:
def track(klass):
original_setattr = klass.__setattr__
def klass_tracker_setattr(self, attr, value):
tracker_setattr(self, attr, value, original_setattr)
klass.__setattr__ = klass_tracker_setattr
else:
def track(klass):
pass
def whoSet(instance, attr):
result = getattr(instance, MODIFICATION_CONTAINER_ID, None)
if result is not None:
result = result.get(attr)
if result is not None:
result = result.formatStack()
return result
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/lib/bootstrap.py 0000664 0000000 0000000 00000012315 12601037530 0025014 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2006-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
from . import logging
from .handler import EventHandler
from .protocol import uuid_str, Packets
from .connection import ClientConnection
class BootstrapManager(EventHandler):
"""
Manage the bootstrap stage, lookup for the primary master then connect to it
"""
accepted = False
def __init__(self, app, name, node_type, uuid=None, server=None):
"""
Manage the bootstrap stage of a non-master node, it lookup for the
primary master node, connect to it then returns when the master node
is ready.
"""
self.primary = None
self.server = server
self.node_type = node_type
self.uuid = uuid
self.name = name
self.num_replicas = None
self.num_partitions = None
self.current = None
def notifyNodeInformation(self, conn, node_list):
pass
def announcePrimary(self, conn):
# We found the primary master early enough to be notified of election
# end. Lucky. Anyway, we must carry on with identification request, so
# nothing to do here.
pass
def connectionCompleted(self, conn):
"""
Triggered when the network connection is successful.
Now ask who's the primary.
"""
EventHandler.connectionCompleted(self, conn)
self.current.setRunning()
conn.ask(Packets.RequestIdentification(self.node_type, self.uuid,
self.server, self.name))
def connectionFailed(self, conn):
"""
Triggered when the network connection failed.
Restart bootstrap.
"""
EventHandler.connectionFailed(self, conn)
self.current = None
def connectionLost(self, conn, new_state):
"""
Triggered when an established network connection is lost.
Restart bootstrap.
"""
self.current.setTemporarilyDown()
self.current = None
def notReady(self, conn, message):
"""
The primary master send this message when it is still not ready to
handle the client node.
Close connection and restart.
"""
conn.close()
def _acceptIdentification(self, node, uuid, num_partitions,
num_replicas, your_uuid, primary, known_master_list):
nm = self.app.nm
# Register new master nodes.
for address, uuid in known_master_list:
master_node = nm.getByAddress(address)
if master_node is None:
master_node = nm.createMaster(address=address)
master_node.setUUID(uuid)
self.primary = nm.getByAddress(primary)
if self.primary is None or self.current is not self.primary:
# three cases here:
# - something goes wrong (unknown UUID)
# - this master doesn't know who's the primary
# - got the primary's uuid, so cut here
node.getConnection().close()
return
logging.info('connected to a primary master node')
self.num_partitions = num_partitions
self.num_replicas = num_replicas
if self.uuid != your_uuid:
# got an uuid from the primary master
self.uuid = your_uuid
logging.info('Got a new UUID: %s', uuid_str(self.uuid))
self.accepted = True
def getPrimaryConnection(self):
"""
Primary lookup/connection process.
Returns when the connection is made.
"""
logging.info('connecting to a primary master node')
em, nm = self.app.em, self.app.nm
index = 0
self.current = None
conn = None
# retry until identified to the primary
while not self.accepted:
if self.current is None:
# conn closed
conn = None
# select a master
master_list = nm.getMasterList()
index = (index + 1) % len(master_list)
self.current = master_list[index]
if conn is None:
# open the connection
conn = ClientConnection(em, self, self.current)
# Yes, the connection may be already closed. This happens when
# the kernel reacts so quickly to a closed port that 'connect'
# fails on the first call. In such case, poll(1) would deadlock
# if there's no other connection to timeout.
if conn.isClosed():
continue
# still processing
em.poll(1)
return (self.current, conn, self.uuid, self.num_partitions,
self.num_replicas)
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/lib/config.py 0000664 0000000 0000000 00000006502 12601037530 0024245 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2006-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
from ConfigParser import SafeConfigParser, NoOptionError
from . import util
from .util import parseNodeAddress
class ConfigurationManager(object):
"""
Configuration manager that load options from a configuration file and
command line arguments
"""
def __init__(self, defaults, config_file, section, argument_list):
self.defaults = defaults
self.argument_list = argument_list
self.parser = None
if config_file is not None:
self.parser = SafeConfigParser(defaults)
self.parser.read(config_file)
self.section = section
def __get(self, key, optional=False):
value = self.argument_list.get(key)
if value is None:
if self.parser is None:
value = self.defaults.get(key)
else:
try:
value = self.parser.get(self.section, key)
except NoOptionError:
pass
if value is None and not optional:
raise RuntimeError("Option '%s' is undefined'" % (key, ))
return value
def getMasters(self):
""" Get the master node list except itself """
masters = self.__get('masters')
# load master node list except itself
return util.parseMasterList(masters, except_node=self.getBind())
def getBind(self):
""" Get the address to bind to """
bind = self.__get('bind')
return parseNodeAddress(bind, 0)
def getDatabase(self):
return self.__get('database')
def getEngine(self):
return self.__get('engine', True)
def getWait(self):
# BUG
return self.__get('wait')
def getDynamicMasterList(self):
return self.__get('dynamic_master_list', optional=True)
def getAdapter(self):
return self.__get('adapter')
def getCluster(self):
cluster = self.__get('cluster')
assert cluster != '', "Cluster name must be non-empty"
return cluster
def getReplicas(self):
return int(self.__get('replicas'))
def getPartitions(self):
return int(self.__get('partitions'))
def getReset(self):
# only from command line
return self.argument_list.get('reset', False)
def getUUID(self):
# only from command line
uuid = self.argument_list.get('uuid', None)
if uuid:
return int(uuid)
def getUpstreamCluster(self):
return self.__get('upstream_cluster', True)
def getUpstreamMasters(self):
return util.parseMasterList(self.__get('upstream_masters'))
def getAutostart(self):
n = self.__get('autostart', True)
if n:
return int(n)
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/lib/connection.py 0000664 0000000 0000000 00000063252 12601037530 0025144 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2006-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
from functools import wraps
from time import time
from . import attributeTracker, logging
from .connector import ConnectorException, ConnectorTryAgainException, \
ConnectorInProgressException, ConnectorConnectionRefusedException, \
ConnectorConnectionClosedException, ConnectorDelayedConnection
from .locking import RLock
from .protocol import uuid_str, Errors, \
PacketMalformedError, Packets, ParserState
from .util import ReadBuffer
CRITICAL_TIMEOUT = 30
class ConnectionClosed(Exception):
pass
def not_closed(func):
def decorator(self, *args, **kw):
if self.connector is None:
raise ConnectorConnectionClosedException
return func(self, *args, **kw)
return wraps(func)(decorator)
class HandlerSwitcher(object):
_is_handling = False
_next_timeout = None
_next_timeout_msg_id = None
_next_on_timeout = None
_pending = ({}, None),
def __init__(self, handler):
# pending handlers and related requests
self._pending = []
self.setHandler(handler)
def close(self):
self.__dict__.clear()
def isPending(self):
return bool(self._pending[0][0])
def cancelRequests(self, conn, message):
if self.isPending():
p = Errors.ProtocolError(message)
while True:
request_dict, handler = self._pending[0]
while request_dict:
msg_id, request = request_dict.popitem()
p.setId(msg_id)
handler.packetReceived(conn, p, request[3])
if len(self._pending) == 1:
break
del self._pending[0]
def getHandler(self):
return self._pending[0][1]
def getLastHandler(self):
""" Return the last (may be unapplied) handler registered """
return self._pending[-1][1]
def emit(self, request, timeout, on_timeout, kw={}):
# register the request in the current handler
_pending = self._pending
if self._is_handling:
# If this is called while handling a packet, the response is to
# be excpected for the current handler...
(request_dict, _) = _pending[0]
else:
# ...otherwise, queue for the latest handler
assert len(_pending) == 1 or _pending[0][0]
(request_dict, _) = _pending[-1]
msg_id = request.getId()
answer_class = request.getAnswerClass()
assert answer_class is not None, "Not a request"
assert msg_id not in request_dict, "Packet id already expected"
next_timeout = self._next_timeout
if next_timeout is None or timeout < next_timeout:
self._next_timeout = timeout
self._next_timeout_msg_id = msg_id
self._next_on_timeout = on_timeout
request_dict[msg_id] = answer_class, timeout, on_timeout, kw
def getNextTimeout(self):
return self._next_timeout
def timeout(self, connection):
msg_id = self._next_timeout_msg_id
if self._next_on_timeout is not None:
self._next_on_timeout(connection, msg_id)
if self._next_timeout_msg_id != msg_id:
# on_timeout sent a packet with a smaller timeout
# so keep the connection open
return
# Notify that a timeout occured
return msg_id
def handle(self, connection, packet):
assert not self._is_handling
self._is_handling = True
try:
self._handle(connection, packet)
finally:
self._is_handling = False
def _handle(self, connection, packet):
assert len(self._pending) == 1 or self._pending[0][0]
logging.packet(connection, packet, False)
if connection.isClosed() and packet.ignoreOnClosedConnection():
logging.debug('Ignoring packet %r on closed connection %r',
packet, connection)
return
msg_id = packet.getId()
(request_dict, handler) = self._pending[0]
# notifications are not expected
if not packet.isResponse():
handler.packetReceived(connection, packet)
return
# checkout the expected answer class
try:
klass, _, _, kw = request_dict.pop(msg_id)
except KeyError:
klass = None
kw = {}
if klass and isinstance(packet, klass) or packet.isError():
handler.packetReceived(connection, packet, kw)
else:
logging.error('Unexpected answer %r in %r', packet, connection)
if not connection.isClosed():
notification = Packets.Notify('Unexpected answer: %r' % packet)
connection.notify(notification)
connection.abort()
# handler.peerBroken(connection)
# apply a pending handler if no more answers are pending
while len(self._pending) > 1 and not self._pending[0][0]:
del self._pending[0]
logging.debug('Apply handler %r on %r', self._pending[0][1],
connection)
if msg_id == self._next_timeout_msg_id:
self._updateNextTimeout()
def _updateNextTimeout(self):
# Find next timeout and its msg_id
next_timeout = None
for pending in self._pending:
for msg_id, (_, timeout, on_timeout, _) in pending[0].iteritems():
if not next_timeout or timeout < next_timeout[0]:
next_timeout = timeout, msg_id, on_timeout
self._next_timeout, self._next_timeout_msg_id, self._next_on_timeout = \
next_timeout or (None, None, None)
def setHandler(self, handler):
can_apply = len(self._pending) == 1 and not self._pending[0][0]
if can_apply:
# nothing is pending, change immediately
self._pending[0][1] = handler
else:
# put the next handler in queue
self._pending.append([{}, handler])
return can_apply
class BaseConnection(object):
"""A base connection
About timeouts:
Timeout are mainly per-connection instead of per-packet.
The idea is that most of time, packets are received and processed
sequentially, so if it takes a long for a peer to process a packet,
following packets would just be enqueued.
What really matters is that the peer makes progress in its work.
As long as we receive an answer, we consider it's still alive and
it may just have started to process the following request. So we reset
timeouts.
There is anyway nothing more we could do, because processing of a packet
may be delayed in a very unpredictable way depending of previously
received packets on peer side.
Even ourself may be slow to receive a packet. We must not timeout for
an answer that is already in our incoming buffer (read_buf or _queue).
Timeouts in HandlerSwitcher are only there to prioritize some packets.
"""
from .connector import SocketConnector as ConnectorClass
KEEP_ALIVE = 60
def __init__(self, event_manager, handler, connector, addr=None):
assert connector is not None, "Need a low-level connector"
self.em = event_manager
self.connector = connector
self.addr = addr
self._handlers = HandlerSwitcher(handler)
# XXX: do not use getHandler
getHandler = property(lambda self: self._handlers.getHandler)
getLastHandler = property(lambda self: self._handlers.getLastHandler)
isPending = property(lambda self: self._handlers.isPending)
def cancelRequests(self, *args, **kw):
return self._handlers.cancelRequests(self, *args, **kw)
def getTimeout(self):
pass
def lockWrapper(self, func):
return func
def getConnector(self):
return self.connector
def getAddress(self):
return self.addr
def readable(self):
raise NotImplementedError
def writable(self):
raise NotImplementedError
def close(self):
"""Close the connection."""
if self.connector is not None:
self.em.unregister(self)
self.connector.close()
self.connector = None
self.aborted = False
def _getReprInfo(self):
r = [
('uuid', uuid_str(self.getUUID())),
('address', '%s:%u' % self.addr if self.addr else '?'),
('handler', self.getHandler()),
]
connector = self.connector
if connector is None:
return r, ['closed']
r.append(('fd', connector.getDescriptor()))
return r, ['aborted'] if self.isAborted() else []
def __repr__(self):
r, flags = self._getReprInfo()
r = map('%s=%s'.__mod__, r)
r += flags
return '<%s(%s) at %x>' % (
self.__class__.__name__,
', '.join(r),
id(self),
)
def setHandler(self, handler):
if self._handlers.setHandler(handler):
logging.debug('Set handler %r on %r', handler, self)
else:
logging.debug('Delay handler %r on %r', handler, self)
def getUUID(self):
return None
def isClosed(self):
return self.connector is None or self.isAborted()
def isAborted(self):
return False
def isListening(self):
return False
def isServer(self):
return False
def isClient(self):
return False
def hasPendingMessages(self):
return False
def whoSetConnector(self):
"""
Debugging method: call this method to know who set the current
connector value.
"""
return attributeTracker.whoSet(self, 'connector')
def idle(self):
pass
attributeTracker.track(BaseConnection)
class ListeningConnection(BaseConnection):
"""A listen connection."""
def __init__(self, event_manager, handler, addr):
logging.debug('listening to %s:%d', *addr)
connector = self.ConnectorClass(addr)
BaseConnection.__init__(self, event_manager, handler, connector, addr)
connector.makeListeningConnection()
event_manager.register(self)
def readable(self):
try:
connector, addr = self.connector.accept()
logging.debug('accepted a connection from %s:%d', *addr)
handler = self.getHandler()
new_conn = ServerConnection(self.em, handler, connector, addr)
handler.connectionAccepted(new_conn)
except ConnectorTryAgainException:
pass
def getAddress(self):
return self.connector.getAddress()
def isListening(self):
return True
class Connection(BaseConnection):
"""A connection."""
# XXX: rename isPending, hasPendingMessages & pending methods
connecting = False
client = False
server = False
peer_id = None
_next_timeout = None
_timeout = 0
def __init__(self, event_manager, *args, **kw):
BaseConnection.__init__(self, event_manager, *args, **kw)
self.read_buf = ReadBuffer()
self.write_buf = []
self.cur_id = 0
self.aborted = False
self.uuid = None
self._queue = []
self._on_close = None
self._parser_state = ParserState()
def _getReprInfo(self):
r, flags = super(Connection, self)._getReprInfo()
if self._queue:
r.append(('len(queue)', len(self._queue)))
if self._on_close is not None:
r.append(('on_close', getattr(self._on_close, '__name__', '?')))
flags.extend(x for x in ('aborted', 'connecting', 'client', 'server')
if getattr(self, x))
return r, flags
def setOnClose(self, callback):
self._on_close = callback
def isClient(self):
return self.client
def isServer(self):
return self.server
def asClient(self):
try:
del self.idle
assert self.client
except AttributeError:
self.client = True
def asServer(self):
self.server = True
def _closeClient(self):
if self.server:
del self.idle
self.client = False
self.notify(Packets.CloseClient())
else:
self.close()
def closeClient(self):
if self.connector is not None and self.client:
self.idle = self._closeClient
def isAborted(self):
return self.aborted
def getUUID(self):
return self.uuid
def setUUID(self, uuid):
self.uuid = uuid
def setPeerId(self, peer_id):
assert peer_id is not None
self.peer_id = peer_id
def getPeerId(self):
return self.peer_id
def _getNextId(self):
next_id = self.cur_id
self.cur_id = (next_id + 1) & 0xffffffff
return next_id
def updateTimeout(self, t=None):
if not self._queue:
if not t:
t = self._next_timeout - self._timeout
self._timeout = self._handlers.getNextTimeout() or self.KEEP_ALIVE
self._next_timeout = t + self._timeout
def getTimeout(self):
if not self._queue:
return self._next_timeout
def onTimeout(self):
handlers = self._handlers
if handlers.isPending():
msg_id = handlers.timeout(self)
if msg_id is None:
self._next_timeout = time() + self._timeout
else:
logging.info('timeout for #0x%08x with %r', msg_id, self)
self.close()
else:
self.idle()
def abort(self):
"""Abort dealing with this connection."""
logging.debug('aborting a connector for %r', self)
self.aborted = True
assert self.write_buf
if self._on_close is not None:
self._on_close()
self._on_close = None
def writable(self):
"""Called when self is writable."""
self._send()
if not self.write_buf and self.connector is not None:
if self.aborted:
self.close()
else:
self.em.removeWriter(self)
def readable(self):
"""Called when self is readable."""
self._recv()
self._analyse()
if self.aborted:
self.em.removeReader(self)
return not not self._queue
def _analyse(self):
"""Analyse received data."""
try:
while True:
packet = Packets.parse(self.read_buf, self._parser_state)
if packet is None:
break
self._queue.append(packet)
except PacketMalformedError, e:
logging.error('malformed packet from %r: %s', self, e)
self._closure()
def hasPendingMessages(self):
"""
Returns True if there are messages queued and awaiting processing.
"""
return not not self._queue
def process(self):
"""
Process a pending packet.
"""
# check out packet and process it with current handler
self._handlers.handle(self, self._queue.pop(0))
self.updateTimeout()
def pending(self):
return self.connector is not None and self.write_buf
@property
def setReconnectionNoDelay(self):
return self.connector.setReconnectionNoDelay
def close(self):
if self.connector is None:
assert self._on_close is None
assert not self.read_buf
assert not self.write_buf
assert not self.isPending()
return
# process the network events with the last registered handler to
# solve issues where a node is lost with pending handlers and
# create unexpected side effects.
handler = self._handlers.getLastHandler()
super(Connection, self).close()
if self._on_close is not None:
self._on_close()
self._on_close = None
del self.write_buf[:]
self.read_buf.clear()
try:
if self.connecting:
handler.connectionFailed(self)
self.connecting = False
else:
handler.connectionClosed(self)
finally:
self._handlers.close()
def _closure(self):
assert self.connector is not None, self.whoSetConnector()
while self._queue:
self._handlers.handle(self, self._queue.pop(0))
self.close()
def _recv(self):
"""Receive data from a connector."""
try:
data = self.connector.receive()
except ConnectorTryAgainException:
pass
except ConnectorConnectionRefusedException:
assert self.connecting
self._closure()
except ConnectorConnectionClosedException:
# connection resetted by peer, according to the man, this error
# should not occurs but it seems it's false
logging.debug('Connection reset by peer: %r', self.connector)
self._closure()
except:
logging.debug('Unknown connection error: %r', self.connector)
self._closure()
# unhandled connector exception
raise
else:
if not data:
logging.debug('Connection %r closed in recv', self.connector)
self._closure()
return
# last known remote activity
self._next_timeout = time() + self._timeout
self.read_buf.append(data)
def _send(self):
"""Send data to a connector."""
if not self.write_buf:
return
msg = ''.join(self.write_buf)
try:
n = self.connector.send(msg)
except ConnectorTryAgainException:
pass
except ConnectorConnectionClosedException:
# connection resetted by peer
logging.debug('Connection reset by peer: %r', self.connector)
self._closure()
except:
logging.debug('Unknown connection error: %r', self.connector)
# unhandled connector exception
self._closure()
raise
else:
if not n:
logging.debug('Connection %r closed in send', self.connector)
self._closure()
return
if n == len(msg):
del self.write_buf[:]
else:
self.write_buf = [msg[n:]]
def _addPacket(self, packet):
"""Add a packet into the write buffer."""
if self.connector is None:
return
was_empty = not self.write_buf
self.write_buf.extend(packet.encode())
if was_empty:
# enable polling for writing.
self.em.addWriter(self)
logging.packet(self, packet, True)
@not_closed
def notify(self, packet):
""" Then a packet with a new ID """
msg_id = self._getNextId()
packet.setId(msg_id)
self._addPacket(packet)
return msg_id
@not_closed
def ask(self, packet, timeout=CRITICAL_TIMEOUT, on_timeout=None, **kw):
"""
Send a packet with a new ID and register the expectation of an answer
"""
msg_id = self._getNextId()
packet.setId(msg_id)
self._addPacket(packet)
handlers = self._handlers
t = None if handlers.isPending() else time()
handlers.emit(packet, timeout, on_timeout, kw)
if not self._queue:
next_timeout = self._next_timeout
self.updateTimeout(t)
if self._next_timeout < next_timeout:
self.em.wakeup()
return msg_id
@not_closed
def answer(self, packet, msg_id=None):
""" Answer to a packet by re-using its ID for the packet answer """
if msg_id is None:
msg_id = self.getPeerId()
packet.setId(msg_id)
assert packet.isResponse(), packet
self._addPacket(packet)
def idle(self):
self.ask(Packets.Ping())
class ClientConnection(Connection):
"""A connection from this node to a remote node."""
connecting = True
client = True
def __init__(self, event_manager, handler, node):
addr = node.getAddress()
connector = self.ConnectorClass(addr)
Connection.__init__(self, event_manager, handler, connector, addr)
node.setConnection(self)
handler.connectionStarted(self)
self._connect()
def _connect(self):
try:
self.connector.makeClientConnection()
except ConnectorInProgressException:
self.em.register(self)
self.em.addWriter(self)
except ConnectorDelayedConnection, c:
connect_limit, = c.args
self.getTimeout = lambda: connect_limit
self.onTimeout = self._delayedConnect
self.em.register(self, timeout_only=True)
# Fake _addPacket so that if does not
# try to reenable polling for writing.
self.write_buf.insert(0, '')
except ConnectorConnectionRefusedException:
self._closure()
except ConnectorException:
# unhandled connector exception
self._closure()
raise
else:
self.em.register(self)
if self.write_buf:
self.em.addWriter(self)
self._connectionCompleted()
def _delayedConnect(self):
del self.getTimeout, self.onTimeout, self.write_buf[0]
self._connect()
def writable(self):
"""Called when self is writable."""
if self.connector.getError():
self._closure()
else:
self._connectionCompleted()
self.writable()
def _connectionCompleted(self):
self.writable = self.lockWrapper(super(ClientConnection, self).writable)
self.connecting = False
self.updateTimeout(time())
self.getHandler().connectionCompleted(self)
class ServerConnection(Connection):
"""A connection from a remote node to this node."""
# Both server and client must check the connection, in case:
# - the remote crashed brutally (i.e. without closing TCP connections)
# - or packets sent by the remote are dropped (network failure)
# Use different timeout so that in normal condition, server never has to
# ping the client. Otherwise, it would do it about half of the time.
KEEP_ALIVE = Connection.KEEP_ALIVE + 5
server = True
def __init__(self, *args, **kw):
Connection.__init__(self, *args, **kw)
self.em.register(self)
self.updateTimeout(time())
class MTConnectionType(type):
def __init__(cls, *args):
if __debug__:
for name in 'answer',:
setattr(cls, name, cls.lockCheckWrapper(name))
for name in ('_delayedConnect', 'close', 'notify', 'onTimeout',
'process', 'readable', 'writable'):
setattr(cls, name, cls.__class__.lockWrapper(cls, name))
def lockCheckWrapper(cls, name):
def wrapper(self, *args, **kw):
# XXX: Unfortunately, RLock does not has any public method
# to test whether we own the lock or not.
assert self.lock._is_owned(), (self, args, kw)
return getattr(super(cls, self), name)(*args, **kw)
return wraps(getattr(cls, name).im_func)(wrapper)
def lockWrapper(cls, name):
def wrapper(self, *args, **kw):
with self.lock:
return getattr(super(cls, self), name)(*args, **kw)
return wraps(getattr(cls, name).im_func)(wrapper)
class MTClientConnection(ClientConnection):
"""A Multithread-safe version of ClientConnection."""
__metaclass__ = MTConnectionType
def lockWrapper(self, func):
lock = self.lock
def wrapper(*args, **kw):
with lock:
return func(*args, **kw)
return wrapper
def __init__(self, *args, **kwargs):
self.lock = lock = RLock()
self.dispatcher = kwargs.pop('dispatcher')
with lock:
super(MTClientConnection, self).__init__(*args, **kwargs)
def ask(self, packet, timeout=CRITICAL_TIMEOUT, on_timeout=None,
queue=None, **kw):
with self.lock:
if self.isClosed():
raise ConnectionClosed
# XXX: Here, we duplicate Connection.ask because we need to call
# self.dispatcher.register after setId is called and before
# _addPacket is called.
msg_id = self._getNextId()
packet.setId(msg_id)
if queue is None:
if type(packet) is not Packets.Ping:
raise TypeError, 'Only Ping packet can be asked ' \
'without a queue, got a %r.' % (packet, )
else:
self.dispatcher.register(self, msg_id, queue)
self._addPacket(packet)
handlers = self._handlers
t = None if handlers.isPending() else time()
handlers.emit(packet, timeout, on_timeout, kw)
if not self._queue:
next_timeout = self._next_timeout
self.updateTimeout(t)
if self._next_timeout < next_timeout:
self.em.wakeup()
return msg_id
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/lib/connector.py 0000664 0000000 0000000 00000015762 12601037530 0025002 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2009-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import socket
import errno
from time import time
# Global connector registry.
# Fill by calling registerConnectorHandler.
# Read by calling SocketConnector.__new__
connector_registry = {}
def registerConnectorHandler(connector_handler):
connector_registry[connector_handler.af_type] = connector_handler
class SocketConnector(object):
""" This class is a wrapper for a socket """
is_closed = is_server = None
connect_limit = {}
CONNECT_LIMIT = 1
def __new__(cls, addr, s=None):
if s is None:
host, port = addr
for af_type, cls in connector_registry.iteritems():
try :
socket.inet_pton(af_type, host)
break
except socket.error:
pass
else:
raise ValueError("Unknown type of host", host)
self = object.__new__(cls)
self.addr = cls._normAddress(addr)
if s is None:
s = socket.socket(af_type, socket.SOCK_STREAM)
else:
self.is_server = True
self.is_closed = False
self.socket = s
self.socket_fd = s.fileno()
# always use non-blocking sockets
s.setblocking(0)
# disable Nagle algorithm to reduce latency
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
return self
# Threaded tests monkey-patch the following 2 operations.
_connect = lambda self, addr: self.socket.connect(addr)
_bind = lambda self, addr: self.socket.bind(addr)
def makeClientConnection(self):
assert self.is_closed is None
addr = self.addr
try:
connect_limit = self.connect_limit[addr]
if time() < connect_limit:
raise ConnectorDelayedConnection(connect_limit)
except KeyError:
pass
self.connect_limit[addr] = time() + self.CONNECT_LIMIT
self.is_server = self.is_closed = False
try:
self._connect(addr)
except socket.error, (err, errmsg):
if err == errno.EINPROGRESS:
raise ConnectorInProgressException
if err == errno.ECONNREFUSED:
raise ConnectorConnectionRefusedException
raise ConnectorException, 'makeClientConnection to %s failed:' \
' %s:%s' % (addr, err, errmsg)
def makeListeningConnection(self):
assert self.is_closed is None
self.is_closed = False
try:
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._bind(self.addr)
self.socket.listen(5)
except socket.error, (err, errmsg):
self.socket.close()
raise ConnectorException, 'makeListeningConnection on %s failed:' \
' %s:%s' % (addr, err, errmsg)
def getError(self):
return self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
def getDescriptor(self):
# this descriptor must only be used by the event manager, where it
# guarantee unicity only while the connector is opened and registered
# in epoll
return self.socket_fd
@staticmethod
def _normAddress(addr):
return addr
def getAddress(self):
return self._normAddress(self.socket.getsockname())
def accept(self):
try:
s, addr = self.socket.accept()
s = self.__class__(addr, s)
return s, s.addr
except socket.error, (err, errmsg):
if err == errno.EAGAIN:
raise ConnectorTryAgainException
raise ConnectorException, 'accept failed: %s:%s' % \
(err, errmsg)
def receive(self):
try:
return self.socket.recv(4096)
except socket.error, (err, errmsg):
if err == errno.EAGAIN:
raise ConnectorTryAgainException
if err in (errno.ECONNREFUSED, errno.EHOSTUNREACH):
raise ConnectorConnectionRefusedException
if err in (errno.ECONNRESET, errno.ETIMEDOUT):
raise ConnectorConnectionClosedException
raise ConnectorException, 'receive failed: %s:%s' % (err, errmsg)
def send(self, msg):
try:
return self.socket.send(msg)
except socket.error, (err, errmsg):
if err == errno.EAGAIN:
raise ConnectorTryAgainException
if err in (errno.ECONNRESET, errno.ETIMEDOUT, errno.EPIPE):
raise ConnectorConnectionClosedException
raise ConnectorException, 'send failed: %s:%s' % (err, errmsg)
def close(self):
self.is_closed = True
try:
if self.connect_limit[self.addr] < time():
del self.connect_limit[self.addr]
except KeyError:
pass
return self.socket.close()
def setReconnectionNoDelay(self):
"""Mark as successful so that we can reconnect without delay"""
self.connect_limit.pop(self.addr, None)
def __repr__(self):
if self.is_closed is None:
state = 'never opened'
else:
if self.is_closed:
state = 'closed '
else:
state = 'opened '
if self.is_server is None:
state += 'listening'
else:
if self.is_server:
state += 'from '
else:
state += 'to '
state += str(self.addr)
return '<%s at 0x%x fileno %s %s, %s>' % (self.__class__.__name__,
id(self), '?' if self.is_closed else self.socket_fd,
self.getAddress(), state)
class SocketConnectorIPv4(SocketConnector):
" Wrapper for IPv4 sockets"
af_type = socket.AF_INET
class SocketConnectorIPv6(SocketConnector):
" Wrapper for IPv6 sockets"
af_type = socket.AF_INET6
@staticmethod
def _normAddress(addr):
return addr[:2]
registerConnectorHandler(SocketConnectorIPv4)
registerConnectorHandler(SocketConnectorIPv6)
class ConnectorException(Exception):
pass
class ConnectorTryAgainException(ConnectorException):
pass
class ConnectorInProgressException(ConnectorException):
pass
class ConnectorConnectionClosedException(ConnectorException):
pass
class ConnectorConnectionRefusedException(ConnectorException):
pass
class ConnectorDelayedConnection(ConnectorException):
pass
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/lib/debug.py 0000664 0000000 0000000 00000005611 12601037530 0024066 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2010-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import traceback
import signal
import imp
import os
import sys
from functools import wraps
import neo
# kill -RTMIN+2
# Dump information to logs.
# kill -RTMIN+3
# Loads (or reloads) neo.debug module.
# The content is up to you (it's only imported). It can be a breakpoint.
def safe_handler(func):
def wrapper(sig, frame):
try:
func(sig, frame)
except:
# Prevent exception from exiting signal handler, so mistakes in
# "debug" module don't kill process.
traceback.print_exc()
return wraps(func)(wrapper)
@safe_handler
def debugHandler(sig, frame):
file, filename, (suffix, mode, type) = imp.find_module('debug',
neo.__path__)
imp.load_module('neo.debug', file, filename, (suffix, mode, type))
def getPdb(**kw):
try: # try ipython if available
import IPython
shell = IPython.terminal.embed.InteractiveShellEmbed()
return IPython.core.debugger.Pdb(shell.colors, **kw)
except (AttributeError, ImportError):
import pdb
return pdb.Pdb(**kw)
_debugger = None
def winpdb(depth=0):
import rpdb2
depth += 1
if rpdb2.g_debugger is not None:
return rpdb2.setbreak(depth)
script = rpdb2.calc_frame_path(sys._getframe(depth))
pwd = str(os.getpid()) + os.getcwd().replace('/', '_').replace('-', '_')
pid = os.fork()
if pid:
try:
rpdb2.start_embedded_debugger(pwd, depth=depth)
finally:
os.waitpid(pid, 0)
else:
try:
os.execlp('python', 'python', '-c', """import os\nif not os.fork():
import rpdb2, winpdb
rpdb2_raw_input = rpdb2._raw_input
rpdb2._raw_input = lambda s: \
s == rpdb2.STR_PASSWORD_INPUT and %r or rpdb2_raw_input(s)
winpdb.g_ignored_warnings[winpdb.STR_EMBEDDED_WARNING] = True
winpdb.main()
""" % pwd, '-a', script)
finally:
os.abort()
def register(on_log=None):
if on_log is not None:
@safe_handler
def on_log_signal(signum, signal):
on_log()
signal.signal(signal.SIGRTMIN+2, on_log_signal)
signal.signal(signal.SIGRTMIN+3, debugHandler)
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/lib/dispatcher.py 0000664 0000000 0000000 00000012465 12601037530 0025133 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2006-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
from functools import wraps
from .locking import Lock, Empty
EMPTY = {}
NOBODY = []
class ForgottenPacket(object):
"""
Instances of this class will be pushed to queue when an expected answer
is being forgotten. Its purpose is similar to pushing "None" when
connection is closed, but the meaning is different.
"""
def __init__(self, msg_id):
self.msg_id = msg_id
def getId(self):
return self.msg_id
def giant_lock(func):
def wrapped(self, *args, **kw):
self.lock_acquire()
try:
return func(self, *args, **kw)
finally:
self.lock_release()
return wraps(func)(wrapped)
class Dispatcher:
"""Register a packet, connection pair as expecting a response packet."""
def __init__(self):
self.message_table = {}
self.queue_dict = {}
lock = Lock()
self.lock_acquire = lock.acquire
self.lock_release = lock.release
@giant_lock
def dispatch(self, conn, msg_id, packet, kw):
"""
Retrieve register-time provided queue, and put conn and packet in it.
"""
queue = self.message_table.get(id(conn), EMPTY).pop(msg_id, None)
if queue is None:
return False
elif queue is NOBODY:
return True
self._decrefQueue(queue)
queue.put((conn, packet, kw))
return True
def _decrefQueue(self, queue):
queue_id = id(queue)
queue_dict = self.queue_dict
if queue_dict[queue_id] == 1:
queue_dict.pop(queue_id)
else:
queue_dict[queue_id] -= 1
def _increfQueue(self, queue):
queue_id = id(queue)
queue_dict = self.queue_dict
try:
queue_dict[queue_id] += 1
except KeyError:
queue_dict[queue_id] = 1
@giant_lock
def register(self, conn, msg_id, queue):
"""Register an expectation for a reply."""
self.message_table.setdefault(id(conn), {})[msg_id] = queue
self._increfQueue(queue)
def unregister(self, conn):
""" Unregister a connection and put fake packet in queues to unlock
threads excepting responses from that connection """
self.lock_acquire()
try:
message_table = self.message_table.pop(id(conn), EMPTY)
finally:
self.lock_release()
notified_set = set()
_decrefQueue = self._decrefQueue
for queue in message_table.itervalues():
if queue is NOBODY:
continue
queue_id = id(queue)
if queue_id not in notified_set:
queue.put((conn, None, None))
notified_set.add(queue_id)
_decrefQueue(queue)
@giant_lock
def forget(self, conn, msg_id):
""" Forget about a specific message for a specific connection.
Actually makes it "expected by nobody", so we know we can ignore it,
and not detect it as an error. """
message_table = self.message_table[id(conn)]
queue = message_table[msg_id]
if queue is NOBODY:
raise KeyError, 'Already expected by NOBODY: %r, %r' % (
conn, msg_id)
queue.put((conn, ForgottenPacket(msg_id), None))
self.queue_dict[id(queue)] -= 1
message_table[msg_id] = NOBODY
return queue
@giant_lock
def forget_queue(self, queue, flush_queue=True):
"""
Forget all pending messages for given queue.
Actually makes them "expected by nobody", so we know we can ignore
them, and not detect it as an error.
flush_queue (boolean, default=True)
All packets in queue get flushed.
"""
# XXX: expensive lookup: we iterate over the whole dict
found = 0
for message_table in self.message_table.itervalues():
for msg_id, t_queue in message_table.iteritems():
if queue is t_queue:
found += 1
message_table[msg_id] = NOBODY
refcount = self.queue_dict.pop(id(queue), 0)
if refcount != found:
raise ValueError('We hit a refcount bug: %s queue uses ' \
'expected, %s found' % (refcount, found))
if flush_queue:
get = queue.get
while True:
try:
get(block=False)
except Empty:
break
def registered(self, conn):
"""Check if a connection is registered into message table."""
return len(self.message_table.get(id(conn), EMPTY)) != 0
@giant_lock
def pending(self, queue):
return not queue.empty() or self.queue_dict.get(id(queue), 0) > 0
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/lib/event.py 0000664 0000000 0000000 00000022057 12601037530 0024124 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2006-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import os, thread
from time import time
from select import epoll, EPOLLIN, EPOLLOUT, EPOLLERR, EPOLLHUP
from errno import EAGAIN, EEXIST, EINTR, ENOENT
from . import logging
from .locking import Lock
class EpollEventManager(object):
"""This class manages connections and events based on epoll(5)."""
_trigger_exit = False
def __init__(self):
self.connection_dict = {}
# Initialize a dummy 'unregistered' for the very rare case a registered
# connection is closed before the first call to poll. We don't care
# leaking a few integers for connections closed between 2 polls.
self.unregistered = []
self.reader_set = set()
self.writer_set = set()
self.epoll = epoll()
self._pending_processing = []
self._trigger_fd, w = os.pipe()
os.close(w)
self._trigger_lock = Lock()
def close(self):
os.close(self._trigger_fd)
for c in self.connection_dict.values():
c.close()
del self.__dict__
def getConnectionList(self):
# XXX: use index
return [x for x in self.connection_dict.itervalues()
if not x.isAborted()]
def getClientList(self):
# XXX: use index
return [c for c in self.getConnectionList() if c.isClient()]
def getServerList(self):
# XXX: use index
return [c for c in self.getConnectionList() if c.isServer()]
def getConnectionListByUUID(self, uuid):
""" Return the connection associated to the UUID, None if the UUID is
None, invalid or not found"""
# XXX: use index
# XXX: consider remove UUID from connection and thus this method
if uuid is None:
return None
result = []
append = result.append
for conn in self.getConnectionList():
if conn.getUUID() == uuid:
append(conn)
return result
# epoll_wait always waits for EPOLLERR & EPOLLHUP so we're forced
# to unregister when we want to ignore all events for a connection.
def register(self, conn, timeout_only=False):
fd = conn.getConnector().getDescriptor()
self.connection_dict[fd] = conn
if timeout_only:
self.wakeup()
else:
self.epoll.register(fd)
self.addReader(conn)
def unregister(self, conn):
new_pending_processing = [x for x in self._pending_processing
if x is not conn]
# Check that we removed at most one entry from
# self._pending_processing .
assert len(new_pending_processing) > len(self._pending_processing) - 2
self._pending_processing = new_pending_processing
fd = conn.getConnector().getDescriptor()
try:
del self.connection_dict[fd]
self.unregistered.append(fd)
self.epoll.unregister(fd)
except KeyError:
pass
except IOError, e:
if e.errno != ENOENT:
raise
else:
self.reader_set.discard(fd)
self.writer_set.discard(fd)
def isIdle(self):
return not (self._pending_processing or self.writer_set)
def _addPendingConnection(self, conn):
pending_processing = self._pending_processing
if conn not in pending_processing:
pending_processing.append(conn)
def poll(self, blocking=1):
if not self._pending_processing:
# Fetch messages from polled file descriptors
self._poll(blocking)
if not self._pending_processing:
return
to_process = self._pending_processing.pop(0)
try:
to_process.process()
finally:
# ...and requeue if there are pending messages
if to_process.hasPendingMessages():
self._addPendingConnection(to_process)
# Non-blocking call: as we handled a packet, we should just offer
# poll a chance to fetch & send already-available data, but it must
# not delay us.
self._poll(0)
def _poll(self, blocking):
if blocking:
timeout = None
for conn in self.connection_dict.itervalues():
t = conn.getTimeout()
if t and (timeout is None or t < timeout):
timeout = t
timeout_conn = conn
# Make sure epoll_wait does not return too early, because it has a
# granularity of 1ms and Python 2.7 rounds the timeout towards zero.
# See also https://bugs.python.org/issue20452 (fixed in Python 3).
blocking = .001 + max(0, timeout - time()) if timeout else -1
try:
event_list = self.epoll.poll(blocking)
except IOError, exc:
if exc.errno in (0, EAGAIN):
logging.info('epoll.poll triggered undocumented error %r',
exc.errno)
elif exc.errno != EINTR:
raise
return
if event_list:
self.unregistered = unregistered = []
wlist = []
elist = []
for fd, event in event_list:
if event & EPOLLIN:
conn = self.connection_dict[fd]
if conn.readable():
self._addPendingConnection(conn)
if event & EPOLLOUT:
wlist.append(fd)
if event & (EPOLLERR | EPOLLHUP):
elist.append(fd)
for fd in wlist:
if fd not in unregistered:
self.connection_dict[fd].writable()
for fd in elist:
if fd in unregistered:
continue
try:
conn = self.connection_dict[fd]
except KeyError:
assert fd == self._trigger_fd, fd
with self._trigger_lock:
self.epoll.unregister(fd)
if self._trigger_exit:
del self._trigger_exit
thread.exit()
continue
if conn.readable():
self._addPendingConnection(conn)
elif blocking > 0:
logging.debug('timeout triggered for %r', timeout_conn)
timeout_conn.onTimeout()
def wakeup(self, exit=False):
with self._trigger_lock:
self._trigger_exit |= exit
try:
self.epoll.register(self._trigger_fd)
except IOError, e:
# Ignore if 'wakeup' is called several times in a row.
if e.errno != EEXIST:
raise
def addReader(self, conn):
connector = conn.getConnector()
assert connector is not None, conn.whoSetConnector()
fd = connector.getDescriptor()
if fd not in self.reader_set:
self.reader_set.add(fd)
self.epoll.modify(fd, EPOLLIN | (
fd in self.writer_set and EPOLLOUT))
def removeReader(self, conn):
connector = conn.getConnector()
assert connector is not None, conn.whoSetConnector()
fd = connector.getDescriptor()
if fd in self.reader_set:
self.reader_set.remove(fd)
self.epoll.modify(fd, fd in self.writer_set and EPOLLOUT)
def addWriter(self, conn):
connector = conn.getConnector()
assert connector is not None, conn.whoSetConnector()
fd = connector.getDescriptor()
if fd not in self.writer_set:
self.writer_set.add(fd)
self.epoll.modify(fd, EPOLLOUT | (
fd in self.reader_set and EPOLLIN))
def removeWriter(self, conn):
connector = conn.getConnector()
assert connector is not None, conn.whoSetConnector()
fd = connector.getDescriptor()
if fd in self.writer_set:
self.writer_set.remove(fd)
self.epoll.modify(fd, fd in self.reader_set and EPOLLIN)
def log(self):
logging.info('Event Manager:')
logging.info(' Readers: %r', list(self.reader_set))
logging.info(' Writers: %r', list(self.writer_set))
logging.info(' Connections:')
pending_set = set(self._pending_processing)
for fd, conn in self.connection_dict.items():
logging.info(' %r: %r (pending=%r)', fd, conn,
conn in pending_set)
# Default to EpollEventManager.
EventManager = EpollEventManager
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/lib/exception.py 0000664 0000000 0000000 00000001665 12601037530 0025003 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2006-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
class NeoException(Exception):
pass
class ElectionFailure(NeoException):
pass
class PrimaryFailure(NeoException):
pass
class OperationFailure(NeoException):
pass
class DatabaseFailure(NeoException):
pass
class NodeNotReady(NeoException):
pass
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/lib/handler.py 0000664 0000000 0000000 00000021757 12601037530 0024426 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2006-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
from . import logging
from .protocol import (
NodeStates, Packets, Errors, BackendNotImplemented,
BrokenNodeDisallowedError, NotReadyError, PacketMalformedError,
ProtocolError, UnexpectedPacketError)
from .util import cached_property
class EventHandler(object):
"""This class handles events."""
def __new__(cls, app, *args, **kw):
try:
return app._handlers[cls]
except AttributeError: # for BackupApplication
self = object.__new__(cls)
except KeyError:
self = object.__new__(cls)
if cls.__init__ is object.__init__:
app._handlers[cls] = self
self.app = app
return self
def __repr__(self):
return self.__class__.__name__
def __unexpectedPacket(self, conn, packet, message=None):
"""Handle an unexpected packet."""
if message is None:
message = 'unexpected packet type %s in %s' % (type(packet),
self.__class__.__name__)
else:
message = 'unexpected packet: %s in %s' % (message,
self.__class__.__name__)
logging.error(message)
conn.answer(Errors.ProtocolError(message))
conn.abort()
# self.peerBroken(conn)
def dispatch(self, conn, packet, kw={}):
"""This is a helper method to handle various packet types."""
try:
conn.setPeerId(packet.getId())
try:
method = getattr(self, packet.handler_method_name)
except AttributeError:
raise UnexpectedPacketError('no handler found')
args = packet.decode() or ()
method(conn, *args, **kw)
except UnexpectedPacketError, e:
if not conn.isClosed():
self.__unexpectedPacket(conn, packet, *e.args)
except PacketMalformedError, e:
logging.error('malformed packet from %r: %s', conn, e)
conn.close()
# self.peerBroken(conn)
except BrokenNodeDisallowedError:
if not conn.isClosed():
conn.answer(Errors.BrokenNode('go away'))
conn.abort()
except NotReadyError, message:
if not conn.isClosed():
if not message.args:
message = 'Retry Later'
message = str(message)
conn.answer(Errors.NotReady(message))
conn.abort()
except ProtocolError, message:
if not conn.isClosed():
message = str(message)
conn.answer(Errors.ProtocolError(message))
conn.abort()
except BackendNotImplemented, message:
m = message[0]
conn.answer(Errors.BackendNotImplemented(
"%s.%s does not implement %s"
% (m.im_class.__module__, m.im_class.__name__, m.__name__)))
except AssertionError:
conn.close()
raise
def checkClusterName(self, name):
# raise an exception if the given name mismatch the current cluster name
if self.app.name != name:
logging.error('reject an alien cluster')
raise ProtocolError('invalid cluster name')
# Network level handlers
def packetReceived(self, *args):
"""Called when a packet is received."""
self.dispatch(*args)
def connectionStarted(self, conn):
"""Called when a connection is started."""
logging.debug('connection started for %r', conn)
def connectionCompleted(self, conn):
"""Called when a connection is completed."""
logging.debug('connection completed for %r (from %s:%u)',
conn, *conn.getConnector().getAddress())
def connectionFailed(self, conn):
"""Called when a connection failed."""
logging.debug('connection failed for %r', conn)
def connectionAccepted(self, conn):
"""Called when a connection is accepted."""
def connectionClosed(self, conn):
"""Called when a connection is closed by the peer."""
logging.debug('connection closed for %r', conn)
self.connectionLost(conn, NodeStates.TEMPORARILY_DOWN)
#def peerBroken(self, conn):
# """Called when a peer is broken."""
# logging.error('%r is broken', conn)
# # NodeStates.BROKEN
def connectionLost(self, conn, new_state):
""" this is a method to override in sub-handlers when there is no need
to make distinction from the kind event that closed the connection """
pass
# Packet handlers.
def acceptIdentification(self, conn, node_type, *args):
try:
acceptIdentification = self._acceptIdentification
except AttributeError:
raise UnexpectedPacketError('no handler found')
if conn.isClosed():
# acceptIdentification received on a closed (probably aborted,
# actually) connection. Reject any further packet as unexpected.
conn.setHandler(EventHandler(self.app))
return
node = self.app.nm.getByAddress(conn.getAddress())
assert node.getConnection() is conn, (node.getConnection(), conn)
if node.getType() == node_type:
node.setIdentified()
acceptIdentification(node, *args)
return
conn.close()
def ping(self, conn):
conn.answer(Packets.Pong())
def pong(self, conn):
# Ignore PONG packets. The only purpose of ping/pong packets is
# to test/maintain underlying connection.
pass
def notify(self, conn, message):
logging.warning('notification from %r: %s', conn, message)
def closeClient(self, conn):
conn.server = False
if not conn.client:
conn.close()
# Error packet handlers.
def error(self, conn, code, message, **kw):
try:
getattr(self, Errors[code])(conn, message)
except (AttributeError, ValueError):
raise UnexpectedPacketError(message)
# XXX: For some errors, the connection should have been closed by the remote
# peer. But what happens if it's not the case because of some bug ?
def protocolError(self, conn, message):
logging.error('protocol error: %s', message)
def notReadyError(self, conn, message):
logging.error('not ready: %s', message)
def timeoutError(self, conn, message):
logging.error('timeout error: %s', message)
def brokenNodeDisallowedError(self, conn, message):
raise RuntimeError, 'broken node disallowed error: %s' % (message,)
def alreadyPendingError(self, conn, message):
logging.error('already pending error: %s', message)
def ack(self, conn, message):
logging.debug("no error message: %s", message)
def backendNotImplemented(self, conn, message):
raise NotImplementedError(message)
class MTEventHandler(EventHandler):
"""Base class of handler implementations for MTClientConnection"""
@cached_property
def dispatcher(self):
return self.app.dispatcher
def dispatch(self, conn, packet, kw={}):
assert conn.lock._is_owned() # XXX: see also lockCheckWrapper
super(MTEventHandler, self).dispatch(conn, packet, kw)
def packetReceived(self, conn, packet, kw={}):
"""Redirect all received packet to dispatcher thread."""
if packet.isResponse() and type(packet) is not Packets.Pong:
if not self.dispatcher.dispatch(conn, packet.getId(), packet, kw):
raise ProtocolError('Unexpected response packet from %r: %r'
% (conn, packet))
else:
self.dispatch(conn, packet, kw)
def connectionLost(self, conn, new_state):
self.dispatcher.unregister(conn)
def connectionFailed(self, conn):
self.dispatcher.unregister(conn)
def unexpectedInAnswerHandler(*args, **kw):
raise Exception('Unexpected event in an answer handler')
class AnswerBaseHandler(EventHandler):
connectionStarted = unexpectedInAnswerHandler
connectionCompleted = unexpectedInAnswerHandler
connectionFailed = unexpectedInAnswerHandler
connectionAccepted = unexpectedInAnswerHandler
timeoutExpired = unexpectedInAnswerHandler
connectionClosed = unexpectedInAnswerHandler
packetReceived = unexpectedInAnswerHandler
peerBroken = unexpectedInAnswerHandler
protocolError = unexpectedInAnswerHandler
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/lib/locking.py 0000664 0000000 0000000 00000015117 12601037530 0024430 0 ustar 00root root 0000000 0000000 import os
import sys
import threading
import traceback
from collections import deque
from time import time
from Queue import Empty
"""
Verbose locking classes.
Python threading module contains a simple logging mechanism, but:
- It's limitted to RLock class
- It's enabled instance by instance
- Choice to log or not is done at instanciation
- It does not emit any log before trying to acquire lock
This file defines a VerboseLock class implementing basic lock API and
logging in appropriate places with extensive details.
It can be globaly toggled by changing VERBOSE_LOCKING value.
There is no overhead at all when disabled (passthrough to threading
classes).
"""
VERBOSE_LOCKING = False
class LockUser(object):
def __init__(self, message, level=0):
t = threading.currentThread()
ident = getattr(t, 'node_name', t.name)
# This class is instanciated from a place desiring to known what
# called it.
# limit=1 would return execution position in this method
# limit=2 would return execution position in caller
# limit=3 returns execution position in caller's caller
# Additionnal level value (should be positive only) can be used when
# more intermediate calls are involved
self.stack = stack = traceback.extract_stack()[:-2-level]
path, line_number, func_name, line = stack[-1]
# Simplify path. Only keep 3 last path elements. It is enough for
# current Neo directory structure.
path = os.path.join('...', *path.split(os.path.sep)[-3:])
self.time = time()
self.ident = "%s@%r %s:%s %s" % (
ident, self.time, path, line_number, line)
self.note(message)
self.ident = ident
def __eq__(self, other):
return isinstance(other, self.__class__) and self.ident == other.ident
def __repr__(self):
return "%s@%r" % (self.ident, self.time)
def formatStack(self):
return ''.join(traceback.format_list(self.stack))
def note():
write = sys.stderr.write
flush = sys.stderr.flush
def note(self, message):
write("[%s] %s\n" % (self.ident, message))
flush()
return note
note = note()
class VerboseLockBase(object):
_error_class = threading.ThreadError
_release_error = 'release unlocked lock'
def __init__(self, check_owner, name=None, verbose=None):
self._check_owner = check_owner
self._name = name or '<%s@%X>' % (self.__class__.__name__, id(self))
self.owner = None
self.waiting = []
LockUser(repr(self) + " created", 1)
def acquire(self, blocking=1):
owner = self.owner if self._locked() else None
me = LockUser("%s.acquire(%s). Owned by %r. Waiting: %r"
% (self, blocking, owner, self.waiting))
if blocking:
if self._check_owner and me == owner:
me.note("I already own this lock: %r" % owner)
me.note("Owner traceback:\n%s" % owner.formatStack())
me.note("My traceback:\n%s" % me.formatStack())
self.waiting.append(me)
try:
locked = self.lock.acquire(blocking)
finally:
if blocking:
self.waiting.remove(me)
if locked:
self.owner = me
me.note("Lock granted. Waiting: " + repr(self.waiting))
return locked
__enter__ = acquire
def release(self):
me = LockUser("%s.release(). Waiting: %r" % (self, self.waiting))
try:
return self.lock.release()
except self._error_class:
t, v, tb = sys.exc_info()
if str(v) == self._release_error:
raise t, "%s %s (%s)" % (v, self, me), tb
raise
def __exit__(self, t, v, tb):
self.release()
def _locked(self):
raise NotImplementedError
def __repr__(self):
return self._name
class VerboseRLock(VerboseLockBase):
_error_class = RuntimeError
_release_error = 'cannot release un-acquired lock'
def __init__(self, **kw):
super(VerboseRLock, self).__init__(check_owner=False, **kw)
self.lock = threading.RLock()
def _locked(self):
return self.lock._RLock__block.locked()
def _is_owned(self):
return self.lock._is_owned()
class VerboseLock(VerboseLockBase):
def __init__(self, check_owner=True, **kw):
super(VerboseLock, self).__init__(check_owner, **kw)
self.lock = threading.Lock()
def locked(self):
return self.lock.locked()
_locked = locked
class VerboseSemaphore(VerboseLockBase):
def __init__(self, value=1, check_owner=True, **kw):
super(VerboseSemaphore, self).__init__(check_owner, **kw)
self.lock = threading.Semaphore(value)
def _locked(self):
return not self.lock._Semaphore__value
if VERBOSE_LOCKING:
Lock = VerboseLock
RLock = VerboseRLock
Semaphore = VerboseSemaphore
else:
Lock = threading.Lock
RLock = threading.RLock
Semaphore = threading.Semaphore
class SimpleQueue(object):
"""
Similar to Queue.Queue but with simpler locking scheme, reducing lock
contention on "put" (benchmark shows 60% less time spent in "put").
As a result:
- only a single consumer possible ("get" vs. "get" race condition)
- only a single producer possible ("put" vs. "put" race condition)
- no blocking size limit possible
- no consumer -> producer notifications (task_done/join API)
Queue is on the critical path: any moment spent here increases client
application wait for object data, transaction completion, etc.
As we have a single consumer (client application's thread) and a single
producer (lib.dispatcher, which can be called from several threads but
serialises calls internally) for each queue, Queue.Queue's locking scheme
can be relaxed to reduce latency.
"""
__slots__ = ('_lock', '_unlock', '_popleft', '_append', '_queue')
def __init__(self):
lock = Lock()
self._lock = lock.acquire
self._unlock = lock.release
self._queue = queue = deque()
self._popleft = queue.popleft
self._append = queue.append
def get(self, block):
if block:
self._lock(False)
while True:
try:
return self._popleft()
except IndexError:
if not block:
raise Empty
self._lock()
def put(self, item):
self._append(item)
self._lock(False)
self._unlock()
def empty(self):
return not self._queue
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/lib/logger.py 0000664 0000000 0000000 00000023460 12601037530 0024261 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2006-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
# WARNING: Log rotating should not be implemented here.
# SQLite does not access database only by file descriptor,
# and an OperationalError exception would be raised if a log is emitted
# between a rename and a reopen.
# Fortunately, SQLite allow multiple process to access the same DB,
# so an external tool should be able to dump and empty tables.
from collections import deque
from functools import wraps
from logging import getLogger, Formatter, Logger, StreamHandler, \
DEBUG, WARNING
from time import time
from traceback import format_exception
import bz2, inspect, neo, os, signal, sqlite3, sys, threading
# Stats for storage node of matrix test (py2.7:SQLite)
RECORD_SIZE = ( 234360832 # extra memory used
- 16777264 # sum of raw data ('msg' attribute)
) // 187509 # number of records
FMT = ('%(asctime)s %(levelname)-9s %(name)-10s'
' [%(module)14s:%(lineno)3d] \n%(message)s')
class _Formatter(Formatter):
def formatTime(self, record, datefmt=None):
return Formatter.formatTime(self, record,
'%Y-%m-%d %H:%M:%S') + '.%04d' % (record.msecs * 10)
def format(self, record):
lines = iter(Formatter.format(self, record).splitlines())
prefix = lines.next()
return '\n'.join(prefix + line for line in lines)
class PacketRecord(object):
args = None
levelno = DEBUG
__init__ = property(lambda self: self.__dict__.update)
class NEOLogger(Logger):
default_root_handler = StreamHandler()
default_root_handler.setFormatter(_Formatter(FMT))
def __init__(self):
Logger.__init__(self, None)
self.parent = root = getLogger()
if not root.handlers:
root.addHandler(self.default_root_handler)
self._db = None
self._record_queue = deque()
self._record_size = 0
self._async = set()
l = threading.Lock()
self._acquire = l.acquire
release = l.release
def _release():
try:
while self._async:
self._async.pop()(self)
finally:
release()
self._release = _release
self.backlog()
def __enter__(self):
self._acquire()
return self._db
def __exit__(self, t, v, tb):
self._release()
def __async(wrapped):
def wrapper(self):
self._async.add(wrapped)
if self._acquire(0):
self._release()
return wraps(wrapped)(wrapper)
@__async
def reopen(self):
if self._db is None:
return
q = self._db.execute
if not q("SELECT id FROM packet LIMIT 1").fetchone():
q("DROP TABLE protocol")
# DROP TABLE already replaced previous data with zeros,
# so VACUUM is not really useful. But here, it should be free.
q("VACUUM")
self._setup(q("PRAGMA database_list").fetchone()[2])
@__async
def flush(self):
if self._db is None:
return
try:
for r in self._record_queue:
self._emit(r)
finally:
# Always commit, to not lose any record that we could emit.
self.commit()
self._record_queue.clear()
self._record_size = 0
def commit(self):
try:
self._db.commit()
except sqlite3.OperationalError, e:
x = e.args[0]
if x == 'database is locked':
sys.stderr.write('%s: retrying to emit log...' % x)
while e.args[0] == x:
try:
self._db.commit()
except sqlite3.OperationalError, e:
continue
sys.stderr.write(' ok\n')
return
raise
def backlog(self, max_size=1<<24, max_packet=None):
with self:
self._max_packet = max_packet
self._max_size = max_size
if max_size is None:
self.flush()
else:
q = self._record_queue
while max_size < self._record_size:
self._record_size -= RECORD_SIZE + len(q.popleft().msg)
def _setup(self, filename=None, reset=False):
from . import protocol as p
global uuid_str
uuid_str = p.uuid_str
if self._db is not None:
self._db.close()
if not filename:
self._db = None
self._record_queue.clear()
self._record_size = 0
return
if filename:
self._db = sqlite3.connect(filename, check_same_thread=False)
q = self._db.execute
if self._max_size is None:
q("PRAGMA synchronous = OFF")
if 1: # Not only when logging everything,
# but also for interoperability with logrotate.
q("PRAGMA journal_mode = MEMORY")
if reset:
for t in 'log', 'packet':
q('DROP TABLE IF EXISTS ' + t)
q("""CREATE TABLE IF NOT EXISTS log (
id INTEGER PRIMARY KEY AUTOINCREMENT,
date REAL NOT NULL,
name TEXT,
level INTEGER NOT NULL,
pathname TEXT,
lineno INTEGER,
msg TEXT)
""")
q("""CREATE INDEX IF NOT EXISTS _log_i1 ON log(date)""")
q("""CREATE TABLE IF NOT EXISTS packet (
id INTEGER PRIMARY KEY AUTOINCREMENT,
date REAL NOT NULL,
name TEXT,
msg_id INTEGER NOT NULL,
code INTEGER NOT NULL,
peer TEXT NOT NULL,
body BLOB)
""")
q("""CREATE INDEX IF NOT EXISTS _packet_i1 ON packet(date)""")
q("""CREATE TABLE IF NOT EXISTS protocol (
date REAL PRIMARY KEY NOT NULL,
text BLOB NOT NULL)
""")
with open(inspect.getsourcefile(p)) as p:
p = buffer(bz2.compress(p.read()))
for t, in q("SELECT text FROM protocol ORDER BY date DESC"):
if p == t:
break
else:
try:
t = self._record_queue[0].created
except IndexError:
t = time()
with self._db:
q("INSERT INTO protocol VALUES (?,?)", (t, p))
def setup(self, filename=None, reset=False):
with self:
self._setup(filename, reset)
__del__ = setup
def isEnabledFor(self, level):
return True
def _emit(self, r):
if type(r) is PacketRecord:
ip, port = r.addr
peer = '%s %s (%s:%u)' % ('>' if r.outgoing else '<',
uuid_str(r.uuid), ip, port)
msg = r.msg
if msg is not None:
msg = buffer(msg)
self._db.execute("INSERT INTO packet VALUES (NULL,?,?,?,?,?,?)",
(r.created, r._name, r.msg_id, r.code, peer, msg))
else:
pathname = os.path.relpath(r.pathname, *neo.__path__)
self._db.execute("INSERT INTO log VALUES (NULL,?,?,?,?,?,?)",
(r.created, r._name, r.levelno, pathname, r.lineno, r.msg))
def _queue(self, record):
record._name = self.name and str(self.name)
self._acquire()
try:
if self._max_size is None:
self._emit(record)
self.commit()
else:
self._record_size += RECORD_SIZE + len(record.msg)
q = self._record_queue
q.append(record)
if record.levelno < WARNING:
while self._max_size < self._record_size:
self._record_size -= RECORD_SIZE + len(q.popleft().msg)
else:
self.flush()
finally:
self._release()
def callHandlers(self, record):
if self._db is not None:
record.msg = record.getMessage()
record.args = None
if record.exc_info:
record.msg += '\n' + ''.join(
format_exception(*record.exc_info)).strip()
record.exc_info = None
self._queue(record)
if Logger.isEnabledFor(self, record.levelno):
record.name = self.name or 'NEO'
self.parent.callHandlers(record)
def packet(self, connection, packet, outgoing):
if self._db is not None:
body = packet._body
if self._max_packet and self._max_packet < len(body):
body = None
self._queue(PacketRecord(
created=time(),
msg_id=packet._id,
code=packet._code,
outgoing=outgoing,
uuid=connection.getUUID(),
addr=connection.getAddress(),
msg=body))
logging = NEOLogger()
signal.signal(signal.SIGRTMIN, lambda signum, frame: logging.flush())
signal.signal(signal.SIGRTMIN+1, lambda signum, frame: logging.reopen())
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/lib/node.py 0000664 0000000 0000000 00000051425 12601037530 0023731 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2006-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
from time import time
from os.path import exists, getsize
import json
from . import attributeTracker, logging
from .protocol import uuid_str, NodeTypes, NodeStates, ProtocolError
class Node(object):
"""This class represents a node."""
_connection = None
def __init__(self, manager, address=None, uuid=None,
state=NodeStates.UNKNOWN):
self._state = state
self._address = address
self._uuid = uuid
self._manager = manager
self._last_state_change = time()
self._identified = False
manager.add(self)
def notify(self, packet):
assert self.isConnected(), 'Not connected'
self._connection.notify(packet)
def ask(self, packet, *args, **kw):
assert self.isConnected(), 'Not connected'
self._connection.ask(packet, *args, **kw)
def answer(self, packet, msg_id=None):
assert self.isConnected(), 'Not connected'
self._connection.answer(packet, msg_id)
def getLastStateChange(self):
return self._last_state_change
def getState(self):
return self._state
def setState(self, new_state):
if self._state == new_state:
return
if new_state == NodeStates.DOWN:
self._manager.remove(self)
self._state = new_state
else:
old_state = self._state
self._state = new_state
self._manager._updateState(self, old_state)
self._last_state_change = time()
def setAddress(self, address):
if self._address == address:
return
old_address = self._address
self._address = address
self._manager._updateAddress(self, old_address)
def getAddress(self):
return self._address
def setUUID(self, uuid):
if self._uuid == uuid:
return
old_uuid = self._uuid
self._uuid = uuid
self._manager._updateUUID(self, old_uuid)
self._manager._updateIdentified(self)
if self._connection is not None:
self._connection.setUUID(uuid)
def getUUID(self):
return self._uuid
def onConnectionClosed(self):
"""
Callback from node's connection when closed
"""
assert self._connection is not None
del self._connection
self._identified = False
self._manager._updateIdentified(self)
def setConnection(self, connection, force=None):
"""
Define the connection that is currently available to this node.
If there is already a connection set, 'force' must be given:
the new connection replaces the old one if it is true. In any case,
the node must be managed by the same handler for the client and
server parts.
"""
assert connection.getUUID() in (None, self._uuid), connection
connection.setUUID(self._uuid)
conn = self._connection
if conn is None:
self._connection = connection
if connection.isServer():
self.setIdentified()
else:
assert force is not None, \
attributeTracker.whoSet(self, '_connection')
# The test on peer_id is there to protect against buggy nodes.
# XXX: handler comparison does not cover all cases: there may
# be a pending handler change, which won't be detected, or a future
# handler change which is not prevented. Complete implementation
# should allow different handlers for each connection direction,
# with in-packets client/server indicators to decide which handler
# (server-ish or client-ish) to use. There is currently no need for
# the full-fledged functionality, and it is simpler this way.
if not force or conn.getPeerId() is not None or \
type(conn.getHandler()) is not type(connection.getHandler()):
raise ProtocolError("already connected")
def on_closed():
self._connection = connection
assert connection.isServer()
self.setIdentified()
conn.setOnClose(on_closed)
conn.close()
assert not connection.isClosed(), connection
connection.setOnClose(self.onConnectionClosed)
self._manager._updateIdentified(self)
def getConnection(self):
"""
Returns the connection to the node if available
"""
assert self._connection is not None
return self._connection
def isConnected(self, connecting=False):
"""
Returns True is a connection is established with the node
"""
return self._connection is not None and (connecting or
not self._connection.connecting)
def setIdentified(self):
assert self._connection is not None
self._identified = True
def isIdentified(self):
"""
Returns True if identification packets have been exchanged
"""
return self._identified
def __repr__(self):
return '<%s(uuid=%s, address=%s, state=%s, connection=%r) at %x>' % (
self.__class__.__name__,
uuid_str(self._uuid),
self._address,
self._state,
self._connection,
id(self),
)
def isMaster(self):
return False
def isStorage(self):
return False
def isClient(self):
return False
def isAdmin(self):
return False
def isRunning(self):
return self._state == NodeStates.RUNNING
def isUnknown(self):
return self._state == NodeStates.UNKNOWN
def isTemporarilyDown(self):
return self._state == NodeStates.TEMPORARILY_DOWN
def isDown(self):
return self._state == NodeStates.DOWN
def isBroken(self):
return self._state == NodeStates.BROKEN
def isHidden(self):
return self._state == NodeStates.HIDDEN
def isPending(self):
return self._state == NodeStates.PENDING
def setRunning(self):
self.setState(NodeStates.RUNNING)
def setUnknown(self):
self.setState(NodeStates.UNKNOWN)
def setTemporarilyDown(self):
self.setState(NodeStates.TEMPORARILY_DOWN)
def setDown(self):
self.setState(NodeStates.DOWN)
def setBroken(self):
self.setState(NodeStates.BROKEN)
def setHidden(self):
self.setState(NodeStates.HIDDEN)
def setPending(self):
self.setState(NodeStates.PENDING)
def asTuple(self):
""" Returned tuple is intented to be used in procotol encoders """
return (self.getType(), self._address, self._uuid, self._state)
def __gt__(self, node):
# sort per UUID if defined
if self._uuid is not None:
return self._uuid > node._uuid
return self._address > node._address
def getType(self):
try:
return NODE_CLASS_MAPPING[self.__class__]
except KeyError:
raise NotImplementedError
def whoSetState(self):
"""
Debugging method: call this method to know who set the current
state value.
"""
return attributeTracker.whoSet(self, '_state')
attributeTracker.track(Node)
class MasterNode(Node):
"""This class represents a master node."""
def isMaster(self):
return True
class StorageNode(Node):
"""This class represents a storage node."""
def isStorage(self):
return True
class ClientNode(Node):
"""This class represents a client node."""
def isClient(self):
return True
class AdminNode(Node):
"""This class represents an admin node."""
def isAdmin(self):
return True
NODE_TYPE_MAPPING = {
NodeTypes.MASTER: MasterNode,
NodeTypes.STORAGE: StorageNode,
NodeTypes.CLIENT: ClientNode,
NodeTypes.ADMIN: AdminNode,
}
NODE_CLASS_MAPPING = {
StorageNode: NodeTypes.STORAGE,
MasterNode: NodeTypes.MASTER,
ClientNode: NodeTypes.CLIENT,
AdminNode: NodeTypes.ADMIN,
}
class MasterDB(object):
"""
Manages accesses to master's address database.
"""
def __init__(self, path):
self._path = path
try_load = exists(path) and getsize(path)
if try_load:
db = open(path, 'r')
init_set = map(tuple, json.load(db))
else:
db = open(path, 'w+')
init_set = []
self._set = set(init_set)
db.close()
def _save(self):
try:
db = open(self._path, 'w')
except IOError:
logging.warning('failed opening master database at %r '
'for writing, update skipped', self._path)
else:
json.dump(list(self._set), db)
db.close()
def add(self, addr):
self._set.add(addr)
self._save()
def discard(self, addr):
self._set.discard(addr)
self._save()
def __iter__(self):
return iter(self._set)
class NodeManager(object):
"""This class manages node status."""
_master_db = None
# TODO: rework getXXXList() methods, filter first by node type
# - getStorageList(identified=True, connected=True, )
# - getList(...)
def __init__(self, master_db=None):
"""
master_db (string)
Path to a file containing master nodes's addresses. Used to automate
master list updates. If not provided, no automation will happen.
"""
self._node_set = set()
self._address_dict = {}
self._uuid_dict = {}
self._type_dict = {}
self._state_dict = {}
self._identified_dict = {}
if master_db is not None:
self._master_db = db = MasterDB(master_db)
for addr in db:
self.createMaster(address=addr)
close = __init__
def add(self, node):
if node in self._node_set:
logging.warning('adding a known node %r, ignoring', node)
return
assert not node.isDown(), node
self._node_set.add(node)
self._updateAddress(node, None)
self._updateUUID(node, None)
self.__updateSet(self._type_dict, None, node.__class__, node)
self.__updateSet(self._state_dict, None, node.getState(), node)
self._updateIdentified(node)
if node.isMaster() and self._master_db is not None:
self._master_db.add(node.getAddress())
def remove(self, node):
if node not in self._node_set:
logging.warning('removing unknown node %r, ignoring', node)
return
self._node_set.remove(node)
self.__drop(self._address_dict, node.getAddress())
self.__drop(self._uuid_dict, node.getUUID())
self.__dropSet(self._state_dict, node.getState(), node)
self.__dropSet(self._type_dict, node.__class__, node)
uuid = node.getUUID()
if uuid in self._identified_dict:
del self._identified_dict[uuid]
if node.isMaster() and self._master_db is not None:
self._master_db.discard(node.getAddress())
def __drop(self, index_dict, key):
try:
del index_dict[key]
except KeyError:
# a node may have not be indexed by uuid or address, eg.:
# - a master known by address but without UUID
# - a client or admin node that don't have listening address
pass
def __update(self, index_dict, old_key, new_key, node):
""" Update an index from old to new key """
if old_key is not None:
assert index_dict[old_key] is node, '%r is stored as %s, ' \
'moving %r to %s' % (index_dict[old_key], old_key, node,
new_key)
del index_dict[old_key]
if new_key is not None:
assert index_dict.get(new_key, node) is node, 'Adding %r at %r ' \
'would overwrite %r' % (node, new_key, index_dict[new_key])
index_dict[new_key] = node
def _updateIdentified(self, node):
uuid = node.getUUID()
if uuid:
# XXX: It's probably a bug to include connecting nodes but there's
# no API yet to update manager when connection is established.
if node.isConnected(connecting=True):
assert node in self._node_set, node
self._identified_dict[uuid] = node
else:
self._identified_dict.pop(uuid, None)
def _updateAddress(self, node, old_address):
self.__update(self._address_dict, old_address, node.getAddress(), node)
def _updateUUID(self, node, old_uuid):
self.__update(self._uuid_dict, old_uuid, node.getUUID(), node)
def __dropSet(self, set_dict, key, node):
if key in set_dict and node in set_dict[key]:
set_dict[key].remove(node)
def __updateSet(self, set_dict, old_key, new_key, node):
""" Update a set index from old to new key """
if old_key in set_dict:
set_dict[old_key].remove(node)
if new_key is not None:
set_dict.setdefault(new_key, set()).add(node)
def _updateState(self, node, old_state):
assert not node.isDown(), node
self.__updateSet(self._state_dict, old_state, node.getState(), node)
def getList(self, node_filter=None):
if node_filter is None:
return list(self._node_set)
return filter(node_filter, self._node_set)
def getIdentifiedList(self, pool_set=None):
"""
Returns a generator to iterate over identified nodes
pool_set is an iterable of UUIDs allowed
"""
if pool_set is not None:
identified_nodes = self._identified_dict.items()
return [v for k, v in identified_nodes if k in pool_set]
return self._identified_dict.values()
def getConnectedList(self):
"""
Returns a generator to iterate over connected nodes
"""
# TODO: use an index
return [x for x in self._node_set if x.isConnected()]
def __getList(self, index_dict, key):
return index_dict.setdefault(key, set())
def getByStateList(self, state):
""" Get a node list filtered per the node state """
return list(self.__getList(self._state_dict, state))
def __getTypeList(self, type_klass, only_identified=False):
node_set = self.__getList(self._type_dict, type_klass)
if only_identified:
return [x for x in node_set if x.getUUID() in self._identified_dict]
return list(node_set)
def getMasterList(self, only_identified=False):
""" Return a list with master nodes """
return self.__getTypeList(MasterNode, only_identified)
def getStorageList(self, only_identified=False):
""" Return a list with storage nodes """
return self.__getTypeList(StorageNode, only_identified)
def getClientList(self, only_identified=False):
""" Return a list with client nodes """
return self.__getTypeList(ClientNode, only_identified)
def getAdminList(self, only_identified=False):
""" Return a list with admin nodes """
return self.__getTypeList(AdminNode, only_identified)
def getByAddress(self, address):
""" Return the node that match with a given address """
return self._address_dict.get(address, None)
def getByUUID(self, uuid):
""" Return the node that match with a given UUID """
return self._uuid_dict.get(uuid, None)
def hasAddress(self, address):
return address in self._address_dict
def hasUUID(self, uuid):
return uuid in self._uuid_dict
def _createNode(self, klass, address=None, uuid=None, **kw):
by_address = self.getByAddress(address)
by_uuid = self.getByUUID(uuid)
if by_address is None and by_uuid is None:
node = klass(self, address=address, uuid=uuid, **kw)
else:
if by_uuid is None or by_address is by_uuid:
node = by_address
elif by_address is None:
node = by_uuid
else:
raise ValueError('Got different nodes for uuid %s: %r and '
'address %r: %r.' % (uuid_str(uuid), by_uuid, address,
by_address))
if uuid is not None:
node_uuid = node.getUUID()
if node_uuid is None:
node.setUUID(uuid)
elif node_uuid != uuid:
raise ValueError('Expected uuid %s on node %r' % (
uuid_str(uuid), node))
if address is not None:
node_address = node.getAddress()
if node_address is None:
node.setAddress(address)
elif node_address != address:
raise ValueError('Expected address %r on node %r' % (
address, node))
assert node.__class__ is klass, (node.__class__, klass)
return node
def createMaster(self, **kw):
""" Create and register a new master """
return self._createNode(MasterNode, **kw)
def createStorage(self, **kw):
""" Create and register a new storage """
return self._createNode(StorageNode, **kw)
def createClient(self, **kw):
""" Create and register a new client """
return self._createNode(ClientNode, **kw)
def createAdmin(self, **kw):
""" Create and register a new admin """
return self._createNode(AdminNode, **kw)
def _getClassFromNodeType(self, node_type):
klass = NODE_TYPE_MAPPING.get(node_type)
if klass is None:
raise ValueError('Unknown node type : %s' % node_type)
return klass
def createFromNodeType(self, node_type, **kw):
return self._createNode(self._getClassFromNodeType(node_type), **kw)
def update(self, node_list):
for node_type, addr, uuid, state in node_list:
# This should be done here (although klass might not be used in this
# iteration), as it raises if type is not valid.
klass = self._getClassFromNodeType(node_type)
# lookup in current table
node_by_uuid = self.getByUUID(uuid)
node_by_addr = self.getByAddress(addr)
node = node_by_uuid or node_by_addr
log_args = node_type, uuid_str(uuid), addr, state
if node is None:
if state == NodeStates.DOWN:
logging.debug('NOT creating node %s %s %s %s', *log_args)
else:
node = self._createNode(klass, address=addr, uuid=uuid,
state=state)
logging.debug('creating node %r', node)
else:
assert isinstance(node, klass), 'node %r is not ' \
'of expected type: %r' % (node, klass)
assert None in (node_by_uuid, node_by_addr) or \
node_by_uuid is node_by_addr, \
'Discrepancy between node_by_uuid (%r) and ' \
'node_by_addr (%r)' % (node_by_uuid, node_by_addr)
if state == NodeStates.DOWN:
logging.debug('droping node %r (%r), found with %s '
'%s %s %s', node, node.isConnected(), *log_args)
if node.isConnected():
# cut this connection, node removed by handler
node.getConnection().close()
self.remove(node)
else:
logging.debug('updating node %r to %s %s %s %s',
node, *log_args)
node.setUUID(uuid)
node.setAddress(addr)
node.setState(state)
self.log()
def log(self):
logging.info('Node manager : %u nodes', len(self._node_set))
if self._node_set:
node_list = [(node, uuid_str(node.getUUID()))
for node in sorted(self._node_set)]
max_len = max(len(x[1]) for x in node_list)
for node, uuid in node_list:
address = node.getAddress() or ''
if address:
address = '%s:%d' % address
logging.info(' * %*s | %8s | %22s | %s',
max_len, uuid, node.getType(), address, node.getState())
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/lib/patch.py 0000664 0000000 0000000 00000005221 12601037530 0024074 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2015-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
#
def speedupFileStorageTxnLookup():
"""Speed up lookup of start position when instanciating an iterator
FileStorage does not index the file positions of transactions.
With this patch, we use the existing {oid->file_pos} index to bisect the
the closest file position to start iterating.
"""
from array import array
from bisect import bisect
from collections import defaultdict
from ZODB.FileStorage.FileStorage import FileStorage, FileIterator
typecode = 'L' if array('I').itemsize < 4 else 'I'
class Start(object):
def __init__(self, read_data_header, h, tid):
self.read_data_header = read_data_header
self.h = h << 32
self.tid = tid
def __lt__(self, l):
return self.tid < self.read_data_header(self.h | l).tid
def iterator(self, start=None, stop=None):
if start:
try:
index = self._tidindex
except AttributeError:
# Cache a sorted list of all the file pos from oid index.
# To reduce memory usage, the list is splitted in arrays of
# low order 32-bit words.
tindex = defaultdict(lambda: array(typecode))
for x in self._index.itervalues():
tindex[x >> 32].append(x & 0xffffffff)
index = self._tidindex = []
for h, l in sorted(tindex.iteritems()):
x = array('I')
x.fromlist(sorted(l))
l = self._read_data_header(h << 32 | x[0])
index.append((l.tid, h, x))
x = bisect(index, (start,)) - 1
if x >= 0:
x, h, index = index[x]
x = self._read_data_header
h = x(h << 32 | index[bisect(index, Start(x, h, start)) - 1])
return FileIterator(self._file_name, start, stop, h.tloc)
return FileIterator(self._file_name, start, stop)
FileStorage.iterator = iterator
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/lib/protocol.py 0000664 0000000 0000000 00000131045 12601037530 0024642 0 ustar 00root root 0000000 0000000
# Copyright (C) 2006-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import socket
import sys
import traceback
from cStringIO import StringIO
from struct import Struct
PROTOCOL_VERSION = 3
# Size restrictions.
MIN_PACKET_SIZE = 10
MAX_PACKET_SIZE = 0x4000000
PACKET_HEADER_FORMAT = Struct('!LHL')
# Check that header size is the expected value.
# If it is not, it means that struct module result is incompatible with
# "reference" platform (python 2.4 on x86-64).
assert PACKET_HEADER_FORMAT.size == 10, \
'Unsupported platform, packet header length = %i' % \
(PACKET_HEADER_FORMAT.size, )
RESPONSE_MASK = 0x8000
class Enum(tuple):
class Item(int):
__slots__ = '_name', '_enum'
def __str__(self):
return self._name
def __repr__(self):
return "" % (self._name, self)
def __eq__(self, other):
if type(other) is self.__class__:
assert other._enum is self._enum
return other is self
return other == int(self)
def __new__(cls, func):
names = func.func_code.co_names
self = tuple.__new__(cls, map(cls.Item, xrange(len(names))))
self._name = func.__name__
for item, name in zip(self, names):
setattr(self, name, item)
item._name = name
item._enum = self
return self
def __repr__(self):
return "" % self._name
@Enum
def ErrorCodes():
ACK
NOT_READY
OID_NOT_FOUND
TID_NOT_FOUND
OID_DOES_NOT_EXIST
PROTOCOL_ERROR
BROKEN_NODE
ALREADY_PENDING
REPLICATION_ERROR
CHECKING_ERROR
BACKEND_NOT_IMPLEMENTED
@Enum
def ClusterStates():
RECOVERING
VERIFYING
RUNNING
STOPPING
STARTING_BACKUP
BACKINGUP
STOPPING_BACKUP
@Enum
def NodeTypes():
MASTER
STORAGE
CLIENT
ADMIN
@Enum
def NodeStates():
RUNNING
TEMPORARILY_DOWN
DOWN
BROKEN
HIDDEN
PENDING
UNKNOWN
@Enum
def CellStates():
# Normal state: cell is writable/readable, and it isn't planned to drop it.
UP_TO_DATE
# Write-only cell. Last transactions are missing because storage is/was down
# for a while, or because it is new for the partition. It usually becomes
# UP_TO_DATE when replication is done.
OUT_OF_DATE
# Same as UP_TO_DATE, except that it will be discarded as soon as another
# node finishes to replicate it. It means a partition is moved from 1 node
# to another.
FEEDING
# Not really a state: only used in network packets to tell storages to drop
# partitions.
DISCARDED
# A check revealed that data differs from other replicas. Cell is neither
# readable nor writable.
CORRUPTED
@Enum
def LockState():
NOT_LOCKED
GRANTED
GRANTED_TO_OTHER
# used for logging
node_state_prefix_dict = {
NodeStates.RUNNING: 'R',
NodeStates.TEMPORARILY_DOWN: 'T',
NodeStates.DOWN: 'D',
NodeStates.BROKEN: 'B',
NodeStates.HIDDEN: 'H',
NodeStates.PENDING: 'P',
NodeStates.UNKNOWN: 'U',
}
# used for logging
cell_state_prefix_dict = {
CellStates.UP_TO_DATE: 'U',
CellStates.OUT_OF_DATE: 'O',
CellStates.FEEDING: 'F',
CellStates.DISCARDED: 'D',
CellStates.CORRUPTED: 'C',
}
# Other constants.
INVALID_UUID = 0
INVALID_TID = '\xff' * 8
INVALID_OID = '\xff' * 8
INVALID_PARTITION = 0xffffffff
INVALID_ADDRESS_TYPE = socket.AF_UNSPEC
ZERO_HASH = '\0' * 20
ZERO_TID = '\0' * 8
ZERO_OID = '\0' * 8
OID_LEN = len(INVALID_OID)
TID_LEN = len(INVALID_TID)
MAX_TID = '\x7f' + '\xff' * 7 # SQLite does not accept numbers above 2^63-1
# High-order byte:
# 7 6 5 4 3 2 1 0
# | | | | +-+-+-+-- reserved (0)
# | +-+-+---------- node type
# +---------------- temporary if negative
# UUID namespaces are required to prevent conflicts when the master generate
# new uuid before it knows uuid of existing storage nodes. So only the high
# order bit is really important and the 31 other bits could be random.
# Extra namespace information and non-randomness of 3 LOB help to read logs.
UUID_NAMESPACES = {
NodeTypes.STORAGE: 0x00,
NodeTypes.MASTER: -0x10,
NodeTypes.CLIENT: -0x20,
NodeTypes.ADMIN: -0x30,
}
uuid_str = (lambda ns: lambda uuid:
ns[uuid >> 24] + str(uuid & 0xffffff) if uuid else str(uuid)
)({v: str(k)[0] for k, v in UUID_NAMESPACES.iteritems()})
class ProtocolError(Exception):
""" Base class for protocol errors, close the connection """
class PacketMalformedError(ProtocolError):
""" Close the connection and set the node as broken"""
class UnexpectedPacketError(ProtocolError):
""" Close the connection and set the node as broken"""
class NotReadyError(ProtocolError):
""" Just close the connection """
class BrokenNodeDisallowedError(ProtocolError):
""" Just close the connection """
class BackendNotImplemented(Exception):
""" Method not implemented by backend storage """
class Packet(object):
"""
Base class for any packet definition. The _fmt class attribute must be
defined for any non-empty packet.
"""
_ignore_when_closed = False
_request = None
_answer = None
_body = None
_code = None
_fmt = None
_id = None
def __init__(self, *args, **kw):
assert self._code is not None, "Packet class not registered"
if args or kw:
args = list(args)
buf = StringIO()
# load named arguments
for item in self._fmt._items[len(args):]:
args.append(kw.get(item._name))
self._fmt.encode(buf.write, args)
self._body = buf.getvalue()
else:
self._body = ''
def decode(self):
assert self._body is not None
if self._fmt is None:
return ()
buf = StringIO(self._body)
try:
return self._fmt.decode(buf.read)
except ParseError, msg:
name = self.__class__.__name__
raise PacketMalformedError("%s fail (%s)" % (name, msg))
def setContent(self, msg_id, body):
""" Register the packet content for future decoding """
self._id = msg_id
self._body = body
def setId(self, value):
self._id = value
def getId(self):
assert self._id is not None, "No identifier applied on the packet"
return self._id
def encode(self):
""" Encode a packet as a string to send it over the network """
content = self._body
length = PACKET_HEADER_FORMAT.size + len(content)
return (PACKET_HEADER_FORMAT.pack(self._id, self._code, length), content)
def __len__(self):
return PACKET_HEADER_FORMAT.size + len(self._body)
def __repr__(self):
return '%s[%r]' % (self.__class__.__name__, self._id)
def __eq__(self, other):
""" Compare packets with their code instead of content """
if other is None:
return False
assert isinstance(other, Packet)
return self._code == other._code
def isError(self):
return isinstance(self, Error)
def isResponse(self):
return self._code & RESPONSE_MASK == RESPONSE_MASK
def getAnswerClass(self):
return self._answer
def ignoreOnClosedConnection(self):
"""
Tells if this packet must be ignored when its connection is closed
when it is handled.
"""
return self._ignore_when_closed
class ParseError(Exception):
"""
An exception that encapsulate another and build the 'path' of the
packet item that generate the error.
"""
def __init__(self, item, trace):
Exception.__init__(self)
self._trace = trace
self._items = [item]
def append(self, item):
self._items.append(item)
def __repr__(self):
chain = '/'.join([item.getName() for item in reversed(self._items)])
return 'at %s:\n%s' % (chain, self._trace)
__str__ = __repr__
# packet parsers
class PItem(object):
"""
Base class for any packet item, _encode and _decode must be overriden
by subclasses.
"""
def __init__(self, name):
self._name = name
def __repr__(self):
return self.__class__.__name__
def getName(self):
return self._name
def _trace(self, method, *args):
try:
return method(*args)
except ParseError, e:
# trace and forward exception
e.append(self)
raise
except Exception:
# original exception, encapsulate it
trace = ''.join(traceback.format_exception(*sys.exc_info())[2:])
raise ParseError(self, trace)
def encode(self, writer, items):
return self._trace(self._encode, writer, items)
def decode(self, reader):
return self._trace(self._decode, reader)
def _encode(self, writer, items):
raise NotImplementedError, self.__class__.__name__
def _decode(self, reader):
raise NotImplementedError, self.__class__.__name__
class PStruct(PItem):
"""
Aggregate other items
"""
def __init__(self, name, *items):
PItem.__init__(self, name)
self._items = items
def _encode(self, writer, items):
assert len(self._items) == len(items), (items, self._items)
for item, value in zip(self._items, items):
item.encode(writer, value)
def _decode(self, reader):
return tuple([item.decode(reader) for item in self._items])
class PStructItem(PItem):
"""
A single value encoded with struct
"""
def __init__(self, name, fmt):
PItem.__init__(self, name)
struct = Struct(fmt)
self.pack = struct.pack
self.unpack = struct.unpack
self.size = struct.size
def _encode(self, writer, value):
writer(self.pack(value))
def _decode(self, reader):
return self.unpack(reader(self.size))[0]
class PList(PStructItem):
"""
A list of homogeneous items
"""
def __init__(self, name, item):
PStructItem.__init__(self, name, '!L')
self._item = item
def _encode(self, writer, items):
writer(self.pack(len(items)))
item = self._item
for value in items:
item.encode(writer, value)
def _decode(self, reader):
length = self.unpack(reader(self.size))[0]
item = self._item
return [item.decode(reader) for _ in xrange(length)]
class PDict(PStructItem):
"""
A dictionary with custom key and value formats
"""
def __init__(self, name, key, value):
PStructItem.__init__(self, name, '!L')
self._key = key
self._value = value
def _encode(self, writer, item):
assert isinstance(item , dict), (type(item), item)
writer(self.pack(len(item)))
key, value = self._key, self._value
for k, v in item.iteritems():
key.encode(writer, k)
value.encode(writer, v)
def _decode(self, reader):
length = self.unpack(reader(self.size))[0]
key, value = self._key, self._value
new_dict = {}
for _ in xrange(length):
k = key.decode(reader)
v = value.decode(reader)
new_dict[k] = v
return new_dict
class PEnum(PStructItem):
"""
Encapsulate an enumeration value
"""
def __init__(self, name, enum):
PStructItem.__init__(self, name, '!l')
self._enum = enum
def _encode(self, writer, item):
if item is None:
item = -1
else:
assert isinstance(item, int), item
writer(self.pack(item))
def _decode(self, reader):
code = self.unpack(reader(self.size))[0]
if code == -1:
return None
try:
return self._enum[code]
except KeyError:
enum = self._enum.__class__.__name__
raise ValueError, 'Invalid code for %s enum: %r' % (enum, code)
class PString(PStructItem):
"""
A variable-length string
"""
def __init__(self, name):
PStructItem.__init__(self, name, '!L')
def _encode(self, writer, value):
writer(self.pack(len(value)))
writer(value)
def _decode(self, reader):
length = self.unpack(reader(self.size))[0]
return reader(length)
class PAddress(PString):
"""
An host address (IPv4/IPv6)
"""
def __init__(self, name):
PString.__init__(self, name)
self._port = Struct('!H')
def _encode(self, writer, address):
if address:
host, port = address
PString._encode(self, writer, host)
writer(self._port.pack(port))
else:
PString._encode(self, writer, '')
def _decode(self, reader):
host = PString._decode(self, reader)
if host:
p = self._port
return host, p.unpack(reader(p.size))[0]
class PBoolean(PStructItem):
"""
A boolean value, encoded as a single byte
"""
def __init__(self, name):
PStructItem.__init__(self, name, '!B')
def _encode(self, writer, value):
writer(self.pack(bool(value)))
def _decode(self, reader):
return bool(self.unpack(reader(self.size))[0])
class PNumber(PStructItem):
"""
A integer number (4-bytes length)
"""
def __init__(self, name):
PStructItem.__init__(self, name, '!L')
class PIndex(PStructItem):
"""
A big integer to defined indexes in a huge list.
"""
def __init__(self, name):
PStructItem.__init__(self, name, '!Q')
class PPTID(PStructItem):
"""
A None value means an invalid PTID
"""
def __init__(self, name):
PStructItem.__init__(self, name, '!Q')
def _encode(self, writer, value):
if value is None:
value = 0
PStructItem._encode(self, writer, value)
def _decode(self, reader):
value = PStructItem._decode(self, reader)
if value == 0:
value = None
return value
class PProtocol(PNumber):
"""
The protocol version definition
"""
def _encode(self, writer, version):
writer(self.pack(version))
def _decode(self, reader):
version = self.unpack(reader(self.size))
if version != (PROTOCOL_VERSION,):
raise ProtocolError('protocol version mismatch')
return version
class PChecksum(PItem):
"""
A hash (SHA1)
"""
def _encode(self, writer, checksum):
assert len(checksum) == 20, (len(checksum), checksum)
writer(checksum)
def _decode(self, reader):
return reader(20)
class PUUID(PStructItem):
"""
An UUID (node identifier, 4-bytes signed integer)
"""
def __init__(self, name):
PStructItem.__init__(self, name, '!l')
def _encode(self, writer, uuid):
writer(self.pack(uuid or 0))
def _decode(self, reader):
return self.unpack(reader(self.size))[0] or None
class PTID(PItem):
"""
A transaction identifier
"""
def _encode(self, writer, tid):
if tid is None:
tid = INVALID_TID
assert len(tid) == 8, (len(tid), tid)
writer(tid)
def _decode(self, reader):
tid = reader(8)
if tid == INVALID_TID:
tid = None
return tid
# same definition, for now
POID = PTID
# common definitions
PFEmpty = PStruct('no_content')
PFNodeType = PEnum('type', NodeTypes)
PFNodeState = PEnum('state', NodeStates)
PFCellState = PEnum('state', CellStates)
PFNodeList = PList('node_list',
PStruct('node',
PFNodeType,
PAddress('address'),
PUUID('uuid'),
PFNodeState,
),
)
PFCellList = PList('cell_list',
PStruct('cell',
PUUID('uuid'),
PFCellState,
),
)
PFRowList = PList('row_list',
PStruct('row',
PNumber('offset'),
PFCellList,
),
)
PFHistoryList = PList('history_list',
PStruct('history_entry',
PTID('serial'),
PNumber('size'),
),
)
PFUUIDList = PList('uuid_list',
PUUID('uuid'),
)
PFTidList = PList('tid_list',
PTID('tid'),
)
PFOidList = PList('oid_list',
POID('oid'),
)
# packets definition
class Notify(Packet):
"""
General purpose notification (remote logging)
"""
_fmt = PStruct('notify',
PString('message'),
)
class Error(Packet):
"""
Error is a special type of message, because this can be sent against
any other message, even if such a message does not expect a reply
usually. Any -> Any.
"""
_fmt = PStruct('error',
PNumber('code'),
PString('message'),
)
class Ping(Packet):
"""
Check if a peer is still alive. Any -> Any.
"""
_answer = PFEmpty
class CloseClient(Packet):
"""
Tell peer it can close the connection if it has finished with us. Any -> Any
"""
class RequestIdentification(Packet):
"""
Request a node identification. This must be the first packet for any
connection. Any -> Any.
"""
_fmt = PStruct('request_identification',
PProtocol('protocol_version'),
PFNodeType,
PUUID('uuid'),
PAddress('address'),
PString('name'),
)
_answer = PStruct('accept_identification',
PFNodeType,
PUUID('my_uuid'),
PNumber('num_partitions'),
PNumber('num_replicas'),
PUUID('your_uuid'),
PAddress('primary'),
PList('known_master_list',
PStruct('master',
PAddress('address'),
PUUID('uuid'),
),
),
)
def __init__(self, *args, **kw):
if args or kw:
# always announce current protocol version
args = list(args)
args.insert(0, PROTOCOL_VERSION)
super(RequestIdentification, self).__init__(*args, **kw)
def decode(self):
return super(RequestIdentification, self).decode()[1:]
class PrimaryMaster(Packet):
"""
Ask current primary master's uuid. CTL -> A.
"""
_answer = PStruct('answer_primary',
PUUID('primary_uuid'),
)
class AnnouncePrimary(Packet):
"""
Announce a primary master node election. PM -> SM.
"""
class ReelectPrimary(Packet):
"""
Force a re-election of a primary master node. M -> M.
"""
class LastIDs(Packet):
"""
Ask the last OID, the last TID and the last Partition Table ID so that
a master recover. PM -> S, S -> PM.
"""
_answer = PStruct('answer_last_ids',
POID('last_oid'),
PTID('last_tid'),
PPTID('last_ptid'),
PTID('backup_tid'),
)
class PartitionTable(Packet):
"""
Ask the full partition table. PM -> S.
Answer rows in a partition table. S -> PM.
"""
_answer = PStruct('answer_partition_table',
PPTID('ptid'),
PFRowList,
)
class NotifyPartitionTable(Packet):
"""
Send rows in a partition table to update other nodes. PM -> S, C.
"""
_fmt = PStruct('send_partition_table',
PPTID('ptid'),
PFRowList,
)
class PartitionChanges(Packet):
"""
Notify a subset of a partition table. This is used to notify changes.
PM -> S, C.
"""
_fmt = PStruct('notify_partition_changes',
PPTID('ptid'),
PList('cell_list',
PStruct('cell',
PNumber('offset'),
PUUID('uuid'),
PFCellState,
),
),
)
class StartOperation(Packet):
"""
Tell a storage nodes to start an operation. Until a storage node receives
this message, it must not serve client nodes. PM -> S.
"""
_fmt = PStruct('start_operation',
PBoolean('backup'),
)
class StopOperation(Packet):
"""
Tell a storage node to stop an operation. Once a storage node receives
this message, it must not serve client nodes. PM -> S.
"""
class UnfinishedTransactions(Packet):
"""
Ask unfinished transactions PM -> S.
Answer unfinished transactions S -> PM.
"""
_answer = PStruct('answer_unfinished_transactions',
PTID('max_tid'),
PList('tid_list',
PTID('unfinished_tid'),
),
)
class ObjectPresent(Packet):
"""
Ask if an object is present. If not present, OID_NOT_FOUND should be
returned. PM -> S.
Answer that an object is present. PM -> S.
"""
_fmt = PStruct('object_present',
POID('oid'),
PTID('tid'),
)
_answer = PStruct('object_present',
POID('oid'),
PTID('tid'),
)
class DeleteTransaction(Packet):
"""
Delete a transaction. PM -> S.
"""
_fmt = PStruct('delete_transaction',
PTID('tid'),
PFOidList,
)
class CommitTransaction(Packet):
"""
Commit a transaction. PM -> S.
"""
_fmt = PStruct('commit_transaction',
PTID('tid'),
)
class BeginTransaction(Packet):
"""
Ask to begin a new transaction. C -> PM.
Answer when a transaction begin, give a TID if necessary. PM -> C.
"""
_fmt = PStruct('ask_begin_transaction',
PTID('tid'),
)
_answer = PStruct('answer_begin_transaction',
PTID('tid'),
)
class FinishTransaction(Packet):
"""
Finish a transaction. C -> PM.
Answer when a transaction is finished. PM -> C.
"""
_fmt = PStruct('ask_finish_transaction',
PTID('tid'),
PFOidList,
)
_answer = PStruct('answer_information_locked',
PTID('ttid'),
PTID('tid'),
)
class NotifyTransactionFinished(Packet):
"""
Notify that a transaction blocking a replication is now finished
M -> S
"""
_fmt = PStruct('notify_transaction_finished',
PTID('ttid'),
PTID('max_tid'),
)
class LockInformation(Packet):
"""
Lock information on a transaction. PM -> S.
Notify information on a transaction locked. S -> PM.
"""
_fmt = PStruct('ask_lock_informations',
PTID('ttid'),
PTID('tid'),
PFOidList,
)
_answer = PStruct('answer_information_locked',
PTID('tid'),
)
class InvalidateObjects(Packet):
"""
Invalidate objects. PM -> C.
"""
_fmt = PStruct('ask_finish_transaction',
PTID('tid'),
PFOidList,
)
class UnlockInformation(Packet):
"""
Unlock information on a transaction. PM -> S.
"""
_fmt = PStruct('notify_unlock_information',
PTID('tid'),
)
class GenerateOIDs(Packet):
"""
Ask new object IDs. C -> PM.
Answer new object IDs. PM -> C.
"""
_fmt = PStruct('ask_new_oids',
PNumber('num_oids'),
)
_answer = PStruct('answer_new_oids',
PFOidList,
)
class StoreObject(Packet):
"""
Ask to store an object. Send an OID, an original serial, a current
transaction ID, and data. C -> S.
Answer if an object has been stored. If an object is in conflict,
a serial of the conflicting transaction is returned. In this case,
if this serial is newer than the current transaction ID, a client
node must not try to resolve the conflict. S -> C.
"""
_fmt = PStruct('ask_store_object',
POID('oid'),
PTID('serial'),
PBoolean('compression'),
PChecksum('checksum'),
PString('data'),
PTID('data_serial'),
PTID('tid'),
PBoolean('unlock'),
)
_answer = PStruct('answer_store_object',
PBoolean('conflicting'),
POID('oid'),
PTID('serial'),
)
class AbortTransaction(Packet):
"""
Abort a transaction. C -> S, PM.
"""
_fmt = PStruct('abort_transaction',
PTID('tid'),
)
class StoreTransaction(Packet):
"""
Ask to store a transaction. C -> S.
Answer if transaction has been stored. S -> C.
"""
_fmt = PStruct('ask_store_transaction',
PTID('tid'),
PString('user'),
PString('description'),
PString('extension'),
PFOidList,
)
_answer = PStruct('answer_store_transaction',
PTID('tid'),
)
class GetObject(Packet):
"""
Ask a stored object by its OID and a serial or a TID if given. If a serial
is specified, the specified revision of an object will be returned. If
a TID is specified, an object right before the TID will be returned. C -> S.
Answer the requested object. S -> C.
"""
_fmt = PStruct('ask_object',
POID('oid'),
PTID('serial'),
PTID('tid'),
)
_answer = PStruct('answer_object',
POID('oid'),
PTID('serial_start'),
PTID('serial_end'),
PBoolean('compression'),
PChecksum('checksum'),
PString('data'),
PTID('data_serial'),
)
class TIDList(Packet):
"""
Ask for TIDs between a range of offsets. The order of TIDs is descending,
and the range is [first, last). C -> S.
Answer the requested TIDs. S -> C.
"""
_fmt = PStruct('ask_tids',
PIndex('first'),
PIndex('last'),
PNumber('partition'),
)
_answer = PStruct('answer_tids',
PFTidList,
)
class TIDListFrom(Packet):
"""
Ask for length TIDs starting at min_tid. The order of TIDs is ascending.
C -> S.
Answer the requested TIDs. S -> C
"""
_fmt = PStruct('tid_list_from',
PTID('min_tid'),
PTID('max_tid'),
PNumber('length'),
PNumber('partition'),
)
_answer = PStruct('answer_tids',
PFTidList,
)
class TransactionInformation(Packet):
"""
Ask information about a transaction. Any -> S.
Answer information (user, description) about a transaction. S -> Any.
"""
_fmt = PStruct('ask_transaction_information',
PTID('tid'),
)
_answer = PStruct('answer_transaction_information',
PTID('tid'),
PString('user'),
PString('description'),
PString('extension'),
PBoolean('packed'),
PFOidList,
)
class ObjectHistory(Packet):
"""
Ask history information for a given object. The order of serials is
descending, and the range is [first, last]. C -> S.
Answer history information (serial, size) for an object. S -> C.
"""
_fmt = PStruct('ask_object_history',
POID('oid'),
PIndex('first'),
PIndex('last'),
)
_answer = PStruct('answer_object_history',
POID('oid'),
PFHistoryList,
)
class PartitionList(Packet):
"""
All the following messages are for neoctl to admin node
Ask information about partition
Answer information about partition
"""
_fmt = PStruct('ask_partition_list',
PNumber('min_offset'),
PNumber('max_offset'),
PUUID('uuid'),
)
_answer = PStruct('answer_partition_list',
PPTID('ptid'),
PFRowList,
)
class NodeList(Packet):
"""
Ask information about nodes
Answer information about nodes
"""
_fmt = PStruct('ask_node_list',
PFNodeType,
)
_answer = PStruct('answer_node_list',
PFNodeList,
)
class SetNodeState(Packet):
"""
Set the node state
"""
_fmt = PStruct('set_node_state',
PUUID('uuid'),
PFNodeState,
)
_answer = Error
class AddPendingNodes(Packet):
"""
Ask the primary to include some pending node in the partition table
"""
_fmt = PStruct('add_pending_nodes',
PFUUIDList,
)
_answer = Error
class TweakPartitionTable(Packet):
"""
Ask the primary to optimize the partition table. A -> PM.
"""
_fmt = PStruct('tweak_partition_table',
PFUUIDList,
)
_answer = Error
class NotifyNodeInformation(Packet):
"""
Notify information about one or more nodes. PM -> Any.
"""
_fmt = PStruct('notify_node_informations',
PFNodeList,
)
class NodeInformation(Packet):
"""
Ask node information
"""
_answer = PFEmpty
class SetClusterState(Packet):
"""
Set the cluster state
"""
_fmt = PStruct('set_cluster_state',
PEnum('state', ClusterStates),
)
_answer = Error
class ClusterInformation(Packet):
"""
Notify information about the cluster
"""
_fmt = PStruct('notify_cluster_information',
PEnum('state', ClusterStates),
)
class ClusterState(Packet):
"""
Ask state of the cluster
Answer state of the cluster
"""
_answer = PStruct('answer_cluster_state',
PEnum('state', ClusterStates),
)
class ObjectUndoSerial(Packet):
"""
Ask storage the serial where object data is when undoing given transaction,
for a list of OIDs.
C -> S
Answer serials at which object data is when undoing a given transaction.
object_tid_dict has the following format:
key: oid
value: 3-tuple
current_serial (TID)
The latest serial visible to the undoing transaction.
undo_serial (TID)
Where undone data is (tid at which data is before given undo).
is_current (bool)
If current_serial's data is current on storage.
S -> C
"""
_fmt = PStruct('ask_undo_transaction',
PTID('tid'),
PTID('ltid'),
PTID('undone_tid'),
PFOidList,
)
_answer = PStruct('answer_undo_transaction',
PDict('object_tid_dict',
POID('oid'),
PStruct('object_tid_value',
PTID('current_serial'),
PTID('undo_serial'),
PBoolean('is_current'),
),
),
)
class HasLock(Packet):
"""
Ask a storage is oid is locked by another transaction.
C -> S
Answer whether a transaction holds the write lock for requested object.
"""
_fmt = PStruct('has_load_lock',
PTID('tid'),
POID('oid'),
)
_answer = PStruct('answer_has_lock',
POID('oid'),
PEnum('lock_state', LockState),
)
class CheckCurrentSerial(Packet):
"""
Verifies if given serial is current for object oid in the database, and
take a write lock on it (so that this state is not altered until
transaction ends).
Answer to AskCheckCurrentSerial.
Same structure as AnswerStoreObject, to handle the same way, except there
is nothing to invalidate in any client's cache.
"""
_fmt = PStruct('ask_check_current_serial',
PTID('tid'),
PTID('serial'),
POID('oid'),
)
_answer = PStruct('answer_store_object',
PBoolean('conflicting'),
POID('oid'),
PTID('serial'),
)
class Pack(Packet):
"""
Request a pack at given TID.
C -> M
M -> S
Inform that packing it over.
S -> M
M -> C
"""
_fmt = PStruct('ask_pack',
PTID('tid'),
)
_answer = PStruct('answer_pack',
PBoolean('status'),
)
class CheckReplicas(Packet):
"""
ctl -> A
A -> M
"""
_fmt = PStruct('check_replicas',
PDict('partition_dict',
PNumber('partition'),
PUUID('source'),
),
PTID('min_tid'),
PTID('max_tid'),
)
_answer = Error
class CheckPartition(Packet):
"""
M -> S
"""
_fmt = PStruct('check_partition',
PNumber('partition'),
PStruct('source',
PString('upstream_name'),
PAddress('address'),
),
PTID('min_tid'),
PTID('max_tid'),
)
class CheckTIDRange(Packet):
"""
Ask some stats about a range of transactions.
Used to know if there are differences between a replicating node and
reference node.
S -> S
Stats about a range of transactions.
Used to know if there are differences between a replicating node and
reference node.
S -> S
"""
_fmt = PStruct('ask_check_tid_range',
PNumber('partition'),
PNumber('length'),
PTID('min_tid'),
PTID('max_tid'),
)
_answer = PStruct('answer_check_tid_range',
PNumber('count'),
PChecksum('checksum'),
PTID('max_tid'),
)
class CheckSerialRange(Packet):
"""
Ask some stats about a range of object history.
Used to know if there are differences between a replicating node and
reference node.
S -> S
Stats about a range of object history.
Used to know if there are differences between a replicating node and
reference node.
S -> S
"""
_fmt = PStruct('ask_check_serial_range',
PNumber('partition'),
PNumber('length'),
PTID('min_tid'),
PTID('max_tid'),
POID('min_oid'),
)
_answer = PStruct('answer_check_serial_range',
PNumber('count'),
PChecksum('tid_checksum'),
PTID('max_tid'),
PChecksum('oid_checksum'),
POID('max_oid'),
)
class PartitionCorrupted(Packet):
"""
S -> M
"""
_fmt = PStruct('partition_corrupted',
PNumber('partition'),
PList('cell_list',
PUUID('uuid'),
),
)
class LastTransaction(Packet):
"""
Ask last committed TID.
C -> M
Answer last committed TID.
M -> C
"""
_answer = PStruct('answer_last_transaction',
PTID('tid'),
)
class NotifyReady(Packet):
"""
Notify that node is ready to serve requests.
S -> M
"""
pass
# replication
class FetchTransactions(Packet):
"""
S -> S
"""
_fmt = PStruct('ask_transaction_list',
PNumber('partition'),
PNumber('length'),
PTID('min_tid'),
PTID('max_tid'),
PFTidList, # already known transactions
)
_answer = PStruct('answer_transaction_list',
PTID('pack_tid'),
PTID('next_tid'),
PFTidList, # transactions to delete
)
class AddTransaction(Packet):
"""
S -> S
"""
_fmt = PStruct('add_transaction',
PTID('tid'),
PString('user'),
PString('description'),
PString('extension'),
PBoolean('packed'),
PTID('ttid'),
PFOidList,
)
class FetchObjects(Packet):
"""
S -> S
"""
_fmt = PStruct('ask_object_list',
PNumber('partition'),
PNumber('length'),
PTID('min_tid'),
PTID('max_tid'),
POID('min_oid'),
PDict('object_dict', # already known objects
PTID('serial'),
PFOidList,
),
)
_answer = PStruct('answer_object_list',
PTID('pack_tid'),
PTID('next_tid'),
POID('next_oid'),
PDict('object_dict', # objects to delete
PTID('serial'),
PFOidList,
),
)
class AddObject(Packet):
"""
S -> S
"""
_fmt = PStruct('add_object',
POID('oid'),
PTID('serial'),
PBoolean('compression'),
PChecksum('checksum'),
PString('data'),
PTID('data_serial'),
)
class Replicate(Packet):
"""
Notify a storage node to replicate partitions up to given 'tid'
and from given sources.
M -> S
- upstream_name: replicate from an upstream cluster
- address: address of the source storage node, or None if there's no new
data up to 'tid' for the given partition
"""
_fmt = PStruct('replicate',
PTID('tid'),
PString('upstream_name'),
PDict('source_dict',
PNumber('partition'),
PAddress('address'),
)
)
class ReplicationDone(Packet):
"""
Notify the master node that a partition has been successully replicated from
a storage to another.
S -> M
"""
_fmt = PStruct('notify_replication_done',
PNumber('offset'),
PTID('tid'),
)
class Truncate(Packet):
"""
XXX: Used for both make storage consistent and leave backup mode
M -> S
"""
_fmt = PStruct('truncate',
PTID('tid'),
)
StaticRegistry = {}
def register(request, ignore_when_closed=None):
""" Register a packet in the packet registry """
code = len(StaticRegistry)
if request is Error:
code |= RESPONSE_MASK
# register the request
StaticRegistry[code] = request
if request is None:
return # None registered only to skip a code number (for compatibility)
request._code = code
answer = request._answer
if ignore_when_closed is None:
# By default, on a closed connection:
# - request: ignore
# - answer: keep
# - nofitication: keep
ignore_when_closed = answer is not None
request._ignore_when_closed = ignore_when_closed
if answer in (Error, None):
return request
# build a class for the answer
answer = type('Answer%s' % (request.__name__, ), (Packet, ), {})
answer._fmt = request._answer
# compute the answer code
code = code | RESPONSE_MASK
answer._request = request
assert answer._code is None, "Answer of %s is already used" % (request, )
answer._code = code
request._answer = answer
# and register the answer packet
assert code not in StaticRegistry, "Duplicate response packet code"
StaticRegistry[code] = answer
return (request, answer)
class ParserState(object):
"""
Parser internal state.
To be considered opaque datatype outside of PacketRegistry.parse .
"""
payload = None
def set(self, payload):
self.payload = payload
def get(self):
return self.payload
def clear(self):
self.payload = None
class Packets(dict):
"""
Packet registry that check packet code unicity and provide an index
"""
def __metaclass__(name, base, d):
for k, v in d.iteritems():
if isinstance(v, type) and issubclass(v, Packet):
v.handler_method_name = k[0].lower() + k[1:]
# this builds a "singleton"
return type('PacketRegistry', base, d)(StaticRegistry)
def parse(self, buf, state_container):
state = state_container.get()
if state is None:
header = buf.read(PACKET_HEADER_FORMAT.size)
if header is None:
return None
msg_id, msg_type, msg_len = PACKET_HEADER_FORMAT.unpack(header)
try:
packet_klass = self[msg_type]
except KeyError:
raise PacketMalformedError('Unknown packet type')
if msg_len > MAX_PACKET_SIZE:
raise PacketMalformedError('message too big (%d)' % msg_len)
if msg_len < MIN_PACKET_SIZE:
raise PacketMalformedError('message too small (%d)' % msg_len)
msg_len -= PACKET_HEADER_FORMAT.size
else:
msg_id, packet_klass, msg_len = state
data = buf.read(msg_len)
if data is None:
# Not enough.
if state is None:
state_container.set((msg_id, packet_klass, msg_len))
return None
if state:
state_container.clear()
packet = packet_klass()
packet.setContent(msg_id, data)
return packet
# notifications
Error = register(
Error)
RequestIdentification, AcceptIdentification = register(
RequestIdentification)
# Code of RequestIdentification packet must never change so that 2
# incompatible nodes can reject themselves gracefully (i.e. comparing
# protocol versions) instead of raising PacketMalformedError.
assert RequestIdentification._code == 1
Ping, Pong = register(
Ping)
CloseClient = register(
CloseClient)
Notify = register(
Notify)
AskPrimary, AnswerPrimary = register(
PrimaryMaster)
AnnouncePrimary = register(
AnnouncePrimary)
ReelectPrimary = register(
ReelectPrimary)
NotifyNodeInformation = register(
NotifyNodeInformation)
AskLastIDs, AnswerLastIDs = register(
LastIDs)
AskPartitionTable, AnswerPartitionTable = register(
PartitionTable)
SendPartitionTable = register(
NotifyPartitionTable)
NotifyPartitionChanges = register(
PartitionChanges)
StartOperation = register(
StartOperation)
StopOperation = register(
StopOperation)
AskUnfinishedTransactions, AnswerUnfinishedTransactions = register(
UnfinishedTransactions)
AskObjectPresent, AnswerObjectPresent = register(
ObjectPresent)
DeleteTransaction = register(
DeleteTransaction)
CommitTransaction = register(
CommitTransaction)
AskBeginTransaction, AnswerBeginTransaction = register(
BeginTransaction)
AskFinishTransaction, AnswerTransactionFinished = register(
FinishTransaction, ignore_when_closed=False)
AskLockInformation, AnswerInformationLocked = register(
LockInformation, ignore_when_closed=False)
InvalidateObjects = register(
InvalidateObjects)
NotifyUnlockInformation = register(
UnlockInformation)
AskNewOIDs, AnswerNewOIDs = register(
GenerateOIDs)
AskStoreObject, AnswerStoreObject = register(
StoreObject)
AbortTransaction = register(
AbortTransaction)
AskStoreTransaction, AnswerStoreTransaction = register(
StoreTransaction)
AskObject, AnswerObject = register(
GetObject)
AskTIDs, AnswerTIDs = register(
TIDList)
AskTransactionInformation, AnswerTransactionInformation = register(
TransactionInformation)
AskObjectHistory, AnswerObjectHistory = register(
ObjectHistory)
AskPartitionList, AnswerPartitionList = register(
PartitionList)
AskNodeList, AnswerNodeList = register(
NodeList)
SetNodeState = register(
SetNodeState, ignore_when_closed=False)
AddPendingNodes = register(
AddPendingNodes, ignore_when_closed=False)
TweakPartitionTable = register(
TweakPartitionTable, ignore_when_closed=False)
AskNodeInformation, AnswerNodeInformation = register(
NodeInformation)
SetClusterState = register(
SetClusterState, ignore_when_closed=False)
NotifyClusterInformation = register(
ClusterInformation)
AskClusterState, AnswerClusterState = register(
ClusterState)
AskObjectUndoSerial, AnswerObjectUndoSerial = register(
ObjectUndoSerial)
AskHasLock, AnswerHasLock = register(
HasLock)
AskTIDsFrom, AnswerTIDsFrom = register(
TIDListFrom)
AskPack, AnswerPack = register(
Pack, ignore_when_closed=False)
CheckReplicas = register(
CheckReplicas)
CheckPartition = register(
CheckPartition)
AskCheckTIDRange, AnswerCheckTIDRange = register(
CheckTIDRange)
AskCheckSerialRange, AnswerCheckSerialRange = register(
CheckSerialRange)
NotifyPartitionCorrupted = register(
PartitionCorrupted)
NotifyReady = register(
NotifyReady)
AskLastTransaction, AnswerLastTransaction = register(
LastTransaction)
AskCheckCurrentSerial, AnswerCheckCurrentSerial = register(
CheckCurrentSerial)
NotifyTransactionFinished = register(
NotifyTransactionFinished)
Replicate = register(
Replicate)
NotifyReplicationDone = register(
ReplicationDone)
AskFetchTransactions, AnswerFetchTransactions = register(
FetchTransactions)
AskFetchObjects, AnswerFetchObjects = register(
FetchObjects)
AddTransaction = register(
AddTransaction)
AddObject = register(
AddObject)
Truncate = register(
Truncate)
def Errors():
registry_dict = {}
handler_method_name_dict = {}
def register_error(code):
return lambda self, message='': Error(code, message)
for error in ErrorCodes:
name = ''.join(part.capitalize() for part in str(error).split('_'))
registry_dict[name] = register_error(int(error))
handler_method_name_dict[int(error)] = name[0].lower() + name[1:]
return type('ErrorRegistry', (dict,),
registry_dict)(handler_method_name_dict)
Errors = Errors()
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/lib/pt.py 0000664 0000000 0000000 00000026441 12601037530 0023427 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2006-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import math
from functools import wraps
from . import logging, protocol
from .protocol import uuid_str, CellStates
from .util import u64
from .locking import RLock
class PartitionTableException(Exception):
"""
Base class for partition table exceptions
"""
class Cell(object):
"""This class represents a cell in a partition table."""
def __init__(self, node, state = CellStates.UP_TO_DATE):
self.node = node
self.state = state
def __repr__(self):
return "" % (
uuid_str(self.getUUID()),
self.getAddress(),
self.getState(),
)
def getState(self):
return self.state
def setState(self, state):
assert state != CellStates.DISCARDED
self.state = state
def isUpToDate(self):
return self.state == CellStates.UP_TO_DATE
def isOutOfDate(self):
return self.state == CellStates.OUT_OF_DATE
def isFeeding(self):
return self.state == CellStates.FEEDING
def isCorrupted(self):
return self.state == CellStates.CORRUPTED
def isReadable(self):
return self.state == CellStates.UP_TO_DATE or \
self.state == CellStates.FEEDING
def getNode(self):
return self.node
def getNodeState(self):
"""This is a short hand."""
return self.node.getState()
def getUUID(self):
return self.node.getUUID()
def getAddress(self):
return self.node.getAddress()
class PartitionTable(object):
"""This class manages a partition table."""
def __init__(self, num_partitions, num_replicas):
self._id = None
self.np = num_partitions
self.nr = num_replicas
self.num_filled_rows = 0
# Note: don't use [[]] * num_partition construct, as it duplicates
# instance *references*, so the outer list contains really just one
# inner list instance.
self.partition_list = [[] for _ in xrange(num_partitions)]
self.count_dict = {}
def getID(self):
return self._id
def getPartitions(self):
return self.np
def getReplicas(self):
return self.nr
def clear(self):
"""Forget an existing partition table."""
self._id = None
self.num_filled_rows = 0
# Note: don't use [[]] * self.np construct, as it duplicates
# instance *references*, so the outer list contains really just one
# inner list instance.
self.partition_list = [[] for _ in xrange(self.np)]
self.count_dict.clear()
def getAssignedPartitionList(self, uuid):
""" Return the partition assigned to the specified UUID """
assigned_partitions = []
for offset in xrange(self.np):
for cell in self.getCellList(offset, readable=True):
if cell.getUUID() == uuid:
assigned_partitions.append(offset)
break
return assigned_partitions
def hasOffset(self, offset):
try:
return len(self.partition_list[offset]) > 0
except IndexError:
return False
def getNodeSet(self, readable=False):
if readable:
return {x.getNode() for row in self.partition_list for x in row
if x.isReadable()}
return {x.getNode() for row in self.partition_list for x in row}
def getConnectedNodeList(self):
return [node for node in self.getNodeSet() if node.isConnected()]
def getCellList(self, offset, readable=False):
if readable:
return filter(Cell.isReadable, self.partition_list[offset])
return list(self.partition_list[offset])
def getPartition(self, oid_or_tid):
return u64(oid_or_tid) % self.getPartitions()
def getOutdatedOffsetListFor(self, uuid):
return [
offset for offset in xrange(self.np)
for c in self.partition_list[offset]
if c.getUUID() == uuid and c.getState() == CellStates.OUT_OF_DATE
]
def isAssigned(self, oid, uuid):
""" Check if the oid is assigned to the given node """
for cell in self.partition_list[u64(oid) % self.np]:
if cell.getUUID() == uuid:
return True
return False
def getCell(self, offset, uuid):
for cell in self.partition_list[offset]:
if cell.getUUID() == uuid:
return cell
def setCell(self, offset, node, state):
if state == CellStates.DISCARDED:
return self.removeCell(offset, node)
if node.isBroken() or node.isDown():
raise PartitionTableException('Invalid node state')
self.count_dict.setdefault(node, 0)
for cell in self.partition_list[offset]:
if cell.getNode() is node:
if not cell.isFeeding():
self.count_dict[node] -= 1
cell.setState(state)
break
else:
row = self.partition_list[offset]
self.num_filled_rows += not row
row.append(Cell(node, state))
if state != CellStates.FEEDING:
self.count_dict[node] += 1
return offset, node.getUUID(), state
def removeCell(self, offset, node):
row = self.partition_list[offset]
for cell in row:
if cell.getNode() == node:
row.remove(cell)
self.num_filled_rows -= not row
if not cell.isFeeding():
self.count_dict[node] -= 1
break
return (offset, node.getUUID(), CellStates.DISCARDED)
def load(self, ptid, row_list, nm):
"""
Load the partition table with the specified PTID, discard all previous
content.
"""
self.clear()
self._id = ptid
for offset, row in row_list:
if offset >= self.getPartitions():
raise IndexError
for uuid, state in row:
node = nm.getByUUID(uuid)
# the node must be known by the node manager
assert node is not None
self.setCell(offset, node, state)
logging.debug('partition table loaded (ptid=%s)', ptid)
self.log()
def update(self, ptid, cell_list, nm):
"""
Update the partition with the cell list supplied. Ignore those changes
if the partition table ID is not greater than the current one. If a node
is not known, it is created in the node manager and set as unavailable
"""
if ptid <= self._id:
logging.warning('ignoring older partition changes')
return
self._id = ptid
for offset, uuid, state in cell_list:
node = nm.getByUUID(uuid)
assert node is not None, 'No node found for uuid ' + uuid_str(uuid)
self.setCell(offset, node, state)
logging.debug('partition table updated (ptid=%s)', ptid)
self.log()
def filled(self):
return self.num_filled_rows == self.np
def log(self):
logging.debug(self.format())
def format(self):
return '\n'.join(self._format())
def _format(self):
"""Help debugging partition table management.
Output sample:
pt: node 0: 67ae354b4ed240a0594d042cf5c01b28, R
pt: node 1: a68a01e8bf93e287bd505201c1405bc2, R
pt: node 2: ad7ffe8ceef4468a0c776f3035c7a543, R
pt: node 3: df57d7298678996705cd0092d84580f4, R
pt: 00: .UU.|U..U|.UU.|U..U|.UU.|U..U|.UU.|U..U|.UU.|U..U|.UU.
pt: 11: U..U|.UU.|U..U|.UU.|U..U|.UU.|U..U|.UU.|U..U|.UU.|U..U
Here, there are 4 nodes in RUNNING state.
The first partition has 2 replicas in UP_TO_DATE state, on nodes 1 and
2 (nodes 0 and 3 are displayed as unused for that partition by
displaying a dot).
The first number on the left represents the number of the first
partition on the line (here, line length is 11 to keep the docstring
width under 80 column).
"""
node_list = sorted(self.count_dict)
result = ['pt: node %u: %s, %s' % (i, uuid_str(node.getUUID()),
protocol.node_state_prefix_dict[node.getState()])
for i, node in enumerate(node_list)]
append = result.append
line = []
max_line_len = 20 # XXX: hardcoded number of partitions per line
cell_state_dict = protocol.cell_state_prefix_dict
prefix = 0
prefix_len = int(math.ceil(math.log10(self.np)))
for offset, row in enumerate(self.partition_list):
if len(line) == max_line_len:
append('pt: %0*u: %s' % (prefix_len, prefix, '|'.join(line)))
line = []
prefix = offset
if row is None:
line.append('X' * len(node_list))
else:
cell_dict = {x.getNode(): cell_state_dict[x.getState()]
for x in row}
line.append(''.join(cell_dict.get(x, '.') for x in node_list))
if line:
append('pt: %0*u: %s' % (prefix_len, prefix, '|'.join(line)))
return result
def operational(self):
if not self.filled():
return False
for row in self.partition_list:
for cell in row:
if cell.isReadable() and cell.getNode().isRunning():
break
else:
return False
return True
def getRow(self, offset):
row = self.partition_list[offset]
if row is None:
return []
return [(cell.getUUID(), cell.getState()) for cell in row]
def getRowList(self):
getRow = self.getRow
return [(x, getRow(x)) for x in xrange(self.np)]
def thread_safe(method):
def wrapper(self, *args, **kwargs):
self.lock()
try:
return method(self, *args, **kwargs)
finally:
self.unlock()
return wraps(method)(wrapper)
class MTPartitionTable(PartitionTable):
""" Thread-safe aware version of the partition table, override only methods
used in the client """
def __init__(self, *args, **kwargs):
self._lock = RLock()
PartitionTable.__init__(self, *args, **kwargs)
def lock(self):
self._lock.acquire()
def unlock(self):
self._lock.release()
@thread_safe
def setCell(self, *args, **kwargs):
return PartitionTable.setCell(self, *args, **kwargs)
@thread_safe
def clear(self, *args, **kwargs):
return PartitionTable.clear(self, *args, **kwargs)
@thread_safe
def operational(self, *args, **kwargs):
return PartitionTable.operational(self, *args, **kwargs)
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/lib/threaded_app.py 0000664 0000000 0000000 00000012402 12601037530 0025414 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2006-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import threading, weakref
from . import logging
from .app import BaseApplication
from .connection import ConnectionClosed
from .debug import register as registerLiveDebugger
from .dispatcher import Dispatcher, ForgottenPacket
from .locking import SimpleQueue
from .protocol import Packets
class app_set(weakref.WeakSet):
def on_log(self):
for app in self:
app.log()
app_set = app_set()
registerLiveDebugger(app_set.on_log)
class ThreadContainer(threading.local):
def __init__(self):
self.queue = SimpleQueue()
self.answer = None
class ThreadedApplication(BaseApplication):
"""The client node application."""
def __init__(self, master_nodes, name, **kw):
super(ThreadedApplication, self).__init__(**kw)
self.poll_thread = threading.Thread(target=self.run, name=name)
self.poll_thread.daemon = True
# Internal Attributes common to all thread
self.name = name
self.dispatcher = Dispatcher()
self.master_conn = None
# load master node list
for address in master_nodes:
self.nm.createMaster(address=address)
# no self-assigned UUID, primary master will supply us one
self.uuid = None
# Internal attribute distinct between thread
self._thread_container = ThreadContainer()
app_set.add(self) # to register self.on_log
def close(self):
# Clear all connection
self.master_conn = None
if self.poll_thread.is_alive():
for conn in self.em.getConnectionList():
conn.close()
# Stop polling thread
logging.debug('Stopping %s', self.poll_thread)
self.em.wakeup(True)
else:
super(ThreadedApplication, self).close()
def start(self):
self.poll_thread.is_alive() or self.poll_thread.start()
def run(self):
logging.debug("Started %s", self.poll_thread)
try:
self._run()
finally:
super(ThreadedApplication, self).close()
logging.debug("Poll thread stopped")
def _run(self):
poll = self.em.poll
while 1:
try:
while 1:
poll(1)
except Exception:
self.log()
logging.error("poll raised, retrying", exc_info=1)
def getHandlerData(self):
return self._thread_container.answer
def setHandlerData(self, data):
self._thread_container.answer = data
def log(self):
self.em.log()
self.nm.log()
pt = self.__dict__.get('pt')
if pt is not None:
pt.log()
def _handlePacket(self, conn, packet, kw={}, handler=None):
"""
conn
The connection which received the packet (forwarded to handler).
packet
The packet to handle.
handler
The handler to use to handle packet.
If not given, it will be guessed from connection's not type.
"""
if handler is None:
# Guess the handler to use based on the type of node on the
# connection
node = self.nm.getByAddress(conn.getAddress())
if node is None:
raise ValueError, 'Expecting an answer from a node ' \
'which type is not known... Is this right ?'
if node.isStorage():
handler = self.storage_handler
elif node.isMaster():
handler = self.primary_handler
else:
raise ValueError, 'Unknown node type: %r' % (node.__class__, )
with conn.lock:
handler.dispatch(conn, packet, kw)
def _ask(self, conn, packet, handler=None, **kw):
self.setHandlerData(None)
queue = self._thread_container.queue
msg_id = conn.ask(packet, queue=queue, **kw)
get = queue.get
_handlePacket = self._handlePacket
while True:
qconn, qpacket, kw = get(True)
is_forgotten = isinstance(qpacket, ForgottenPacket)
if conn is qconn:
# check fake packet
if qpacket is None:
raise ConnectionClosed
if msg_id == qpacket.getId():
if is_forgotten:
raise ValueError, 'ForgottenPacket for an ' \
'explicitely expected packet.'
_handlePacket(qconn, qpacket, kw, handler)
break
if not is_forgotten and qpacket is not None:
_handlePacket(qconn, qpacket, kw)
return self.getHandlerData()
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/lib/util.py 0000664 0000000 0000000 00000015650 12601037530 0023761 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2006-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import socket
from binascii import a2b_hex, b2a_hex
from datetime import timedelta, datetime
from hashlib import sha1
from Queue import deque
from struct import pack, unpack
from time import gmtime
TID_LOW_OVERFLOW = 2**32
TID_LOW_MAX = TID_LOW_OVERFLOW - 1
SECOND_PER_TID_LOW = 60.0 / TID_LOW_OVERFLOW
TID_CHUNK_RULES = (
(-1900, 0),
(-1, 12),
(-1, 31),
(0, 24),
(0, 60),
)
def tidFromTime(tm):
gmt = gmtime(tm)
return packTID(
(gmt.tm_year, gmt.tm_mon, gmt.tm_mday, gmt.tm_hour, gmt.tm_min),
int((gmt.tm_sec + (tm - int(tm))) / SECOND_PER_TID_LOW))
def packTID(higher, lower):
"""
higher: a 5-tuple containing year, month, day, hour and minute
lower: seconds scaled to 60:2**32 into a 64 bits TID
"""
assert len(higher) == len(TID_CHUNK_RULES), higher
packed_higher = 0
for value, (offset, multiplicator) in zip(higher, TID_CHUNK_RULES):
assert isinstance(value, (int, long)), value
value += offset
assert 0 <= value, (value, offset, multiplicator)
assert multiplicator == 0 or value < multiplicator, (value,
offset, multiplicator)
packed_higher *= multiplicator
packed_higher += value
# If the machine is configured in such way that gmtime() returns leap
# seconds (e.g. TZ=right/UTC), then the best we can do is to use
# TID_LOW_MAX, because TID format was not designed to support them.
# For more information about leap seconds on Unix, see:
# http://en.wikipedia.org/wiki/Unix_time
# http://www.madore.org/~david/computers/unix-leap-seconds.html
return pack('!LL', packed_higher, min(lower, TID_LOW_MAX))
def unpackTID(ptid):
"""
Unpack given 64 bits TID in to a 2-tuple containing:
- a 5-tuple containing year, month, day, hour and minute
- seconds scaled to 60:2**32
"""
packed_higher, lower = unpack('!LL', ptid)
higher = []
append = higher.append
for offset, multiplicator in reversed(TID_CHUNK_RULES):
if multiplicator:
packed_higher, value = divmod(packed_higher, multiplicator)
else:
packed_higher, value = 0, packed_higher
append(value - offset)
higher.reverse()
return (tuple(higher), lower)
def addTID(ptid, offset):
"""
Offset given packed TID.
"""
higher, lower = unpackTID(ptid)
high_offset, lower = divmod(lower + offset, TID_LOW_OVERFLOW)
if high_offset:
d = datetime(*higher) + timedelta(0, 60 * high_offset)
higher = (d.year, d.month, d.day, d.hour, d.minute)
return packTID(higher, lower)
def u64(s):
return unpack('!Q', s)[0]
def p64(n):
return pack('!Q', n)
def add64(packed, offset):
"""Add a python number to a 64-bits packed value"""
return p64(u64(packed) + offset)
def dump(s):
"""Dump a binary string in hex."""
if s is not None:
if isinstance(s, str):
return b2a_hex(s)
return repr(s)
def bin(s):
"""Inverse of dump method."""
if s is not None:
return a2b_hex(s)
def makeChecksum(s):
"""Return a 20-byte checksum against a string."""
return sha1(s).digest()
def parseNodeAddress(address, port_opt=None):
if address[:1] == '[':
(host, port) = address[1:].split(']')
if port[:1] == ':':
port = port[1:]
else:
port = port_opt
elif address.count(':') == 1:
(host, port) = address.split(':')
else:
host = address
port = port_opt
# Resolve (maybe) and cast to cannonical form
# XXX: Always pick the first result. This might not be what is desired, and
# if so this function should either take a hint on the desired address type
# or return either raw host & port or getaddrinfo return value.
return socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM)[0][4][:2]
def parseMasterList(masters, except_node=None):
assert masters, 'At least one master must be defined'
master_node_list = []
for node in masters.split():
address = parseNodeAddress(node)
if address != except_node:
master_node_list.append(address)
return master_node_list
class ReadBuffer(object):
"""
Implementation of a lazy buffer. Main purpose if to reduce useless
copies of data by storing chunks and join them only when the requested
size is available.
TODO: For better performance, use:
- socket.recv_into (64kiB blocks)
- struct.unpack_from
- and a circular buffer of dynamic size (initial size:
twice the length passed to socket.recv_into ?)
"""
def __init__(self):
self.size = 0
self.content = deque()
def append(self, data):
""" Append some data and compute the new buffer size """
self.size += len(data)
self.content.append(data)
def __len__(self):
""" Return the current buffer size """
return self.size
def read(self, size):
""" Read and consume size bytes """
if self.size < size:
return None
self.size -= size
chunk_list = []
pop_chunk = self.content.popleft
append_data = chunk_list.append
to_read = size
# select required chunks
while to_read > 0:
chunk_data = pop_chunk()
to_read -= len(chunk_data)
append_data(chunk_data)
if to_read < 0:
# too many bytes consumed, cut the last chunk
last_chunk = chunk_list[-1]
keep, let = last_chunk[:to_read], last_chunk[to_read:]
self.content.appendleft(let)
chunk_list[-1] = keep
# join all chunks (one copy)
data = ''.join(chunk_list)
assert len(data) == size
return data
def clear(self):
""" Erase all buffer content """
self.size = 0
self.content.clear()
class cached_property(object):
"""
A property that is only computed once per instance and then replaces itself
with an ordinary attribute. Deleting the attribute resets the property.
"""
def __init__(self, func):
self.__doc__ = func.__doc__
self.func = func
def __get__(self, obj, cls):
if obj is None: return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/master/ 0000775 0000000 0000000 00000000000 12601037530 0023150 5 ustar 00root root 0000000 0000000 neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/master/__init__.py 0000664 0000000 0000000 00000000000 12601037530 0025247 0 ustar 00root root 0000000 0000000 neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/master/app.py 0000664 0000000 0000000 00000052032 12601037530 0024304 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2006-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import sys, weakref
from time import time
from neo.lib import logging
from neo.lib.app import BaseApplication
from neo.lib.debug import register as registerLiveDebugger
from neo.lib.protocol import uuid_str, UUID_NAMESPACES, ZERO_TID
from neo.lib.protocol import ClusterStates, NodeStates, NodeTypes, Packets
from neo.lib.handler import EventHandler
from neo.lib.connection import ListeningConnection, ClientConnection
from neo.lib.exception import ElectionFailure, PrimaryFailure, OperationFailure
class StateChangedException(Exception): pass
from .backup_app import BackupApplication
from .handlers import election, identification, secondary
from .handlers import administration, client, storage
from .pt import PartitionTable
from .recovery import RecoveryManager
from .transactions import TransactionManager
from .verification import VerificationManager
class Application(BaseApplication):
"""The master node application."""
packing = None
# Latest completely commited TID
last_transaction = ZERO_TID
backup_tid = None
backup_app = None
uuid = None
def __init__(self, config):
super(Application, self).__init__(config.getDynamicMasterList())
self.tm = TransactionManager(self.onTransactionCommitted)
self.name = config.getCluster()
self.server = config.getBind()
self.autostart = config.getAutostart()
self.storage_readiness = set()
for master_address in config.getMasters():
self.nm.createMaster(address=master_address)
logging.debug('IP address is %s, port is %d', *self.server)
# Partition table
replicas, partitions = config.getReplicas(), config.getPartitions()
if replicas < 0:
raise RuntimeError, 'replicas must be a positive integer'
if partitions <= 0:
raise RuntimeError, 'partitions must be more than zero'
self.pt = PartitionTable(partitions, replicas)
logging.info('Configuration:')
logging.info('Partitions: %d', partitions)
logging.info('Replicas : %d', replicas)
logging.info('Name : %s', self.name)
self.listening_conn = None
self.primary = None
self.primary_master_node = None
self.cluster_state = None
self._startup_allowed = False
uuid = config.getUUID()
if uuid:
self.uuid = uuid
# election related data
self.unconnected_master_node_set = set()
self.negotiating_master_node_set = set()
self.master_address_dict = weakref.WeakKeyDictionary()
self._current_manager = None
# backup
upstream_cluster = config.getUpstreamCluster()
if upstream_cluster:
if upstream_cluster == self.name:
raise ValueError("upstream cluster name must be"
" different from cluster name")
self.backup_app = BackupApplication(self, upstream_cluster,
config.getUpstreamMasters())
self.administration_handler = administration.AdministrationHandler(
self)
self.secondary_master_handler = secondary.SecondaryMasterHandler(self)
self.client_service_handler = client.ClientServiceHandler(self)
self.storage_service_handler = storage.StorageServiceHandler(self)
registerLiveDebugger(on_log=self.log)
def close(self):
self.listening_conn = None
if self.backup_app is not None:
self.backup_app.close()
super(Application, self).close()
def log(self):
self.em.log()
if self.backup_app is not None:
self.backup_app.log()
self.nm.log()
self.tm.log()
if self.pt is not None:
self.pt.log()
def run(self):
try:
self._run()
except Exception:
logging.exception('Pre-mortem data:')
self.log()
logging.flush()
raise
def _run(self):
"""Make sure that the status is sane and start a loop."""
# Make a listening port.
self.listening_conn = ListeningConnection(self.em, None, self.server)
# Start a normal operation.
while self.cluster_state != ClusterStates.STOPPING:
# (Re)elect a new primary master.
self.primary = not self.nm.getMasterList()
if not self.primary:
self.electPrimary()
try:
if self.primary:
self.playPrimaryRole()
else:
self.playSecondaryRole()
raise RuntimeError, 'should not reach here'
except (ElectionFailure, PrimaryFailure):
# Forget all connections.
for conn in self.em.getClientList():
conn.close()
def electPrimary(self):
"""Elect a primary master node.
The difficulty is that a master node must accept connections from
others while attempting to connect to other master nodes at the
same time. Note that storage nodes and client nodes may connect
to self as well as master nodes."""
logging.info('begin the election of a primary master')
client_handler = election.ClientElectionHandler(self)
self.unconnected_master_node_set.clear()
self.negotiating_master_node_set.clear()
self.master_address_dict.clear()
self.listening_conn.setHandler(election.ServerElectionHandler(self))
getByAddress = self.nm.getByAddress
while True:
# handle new connected masters
for node in self.nm.getMasterList():
node.setUnknown()
self.unconnected_master_node_set.add(node.getAddress())
# start the election process
self.primary = None
self.primary_master_node = None
try:
while (self.unconnected_master_node_set or
self.negotiating_master_node_set):
for addr in self.unconnected_master_node_set:
self.negotiating_master_node_set.add(addr)
ClientConnection(self.em, client_handler,
# XXX: Ugly, but the whole election code will be
# replaced soon
getByAddress(addr))
self.unconnected_master_node_set.clear()
self.em.poll(1)
except ElectionFailure, m:
# something goes wrong, clean then restart
logging.error('election failed: %s', m)
# Ask all connected nodes to reelect a single primary master.
for conn in self.em.getClientList():
conn.notify(Packets.ReelectPrimary())
conn.abort()
# Wait until the connections are closed.
self.primary = None
self.primary_master_node = None
t = time() + 10
while self.em.getClientList() and time() < t:
try:
self.em.poll(1)
except ElectionFailure:
pass
# Close all connections.
for conn in self.em.getClientList() + self.em.getServerList():
conn.close()
else:
# election succeed, stop the process
self.primary = self.primary is None
break
def broadcastNodesInformation(self, node_list):
"""
Broadcast changes for a set a nodes
Send only one packet per connection to reduce bandwidth
"""
node_dict = {}
# group modified nodes by destination node type
for node in node_list:
node_info = node.asTuple()
def assign_for_notification(node_type):
# helper function
node_dict.setdefault(node_type, []).append(node_info)
if node.isMaster() or node.isStorage():
# client get notifications for master and storage only
assign_for_notification(NodeTypes.CLIENT)
if node.isMaster() or node.isStorage() or node.isClient():
assign_for_notification(NodeTypes.STORAGE)
assign_for_notification(NodeTypes.ADMIN)
# send at most one non-empty notification packet per node
for node in self.nm.getIdentifiedList():
node_list = node_dict.get(node.getType())
if node_list and node.isRunning():
node.notify(Packets.NotifyNodeInformation(node_list))
def broadcastPartitionChanges(self, cell_list):
"""Broadcast a Notify Partition Changes packet."""
logging.debug('broadcastPartitionChanges')
if cell_list:
self.pt.log()
ptid = self.pt.setNextID()
packet = Packets.NotifyPartitionChanges(ptid, cell_list)
for node in self.nm.getIdentifiedList():
# TODO: notify masters
if node.isRunning() and not node.isMaster():
node.notify(packet)
def provideService(self):
"""
This is the normal mode for a primary master node. Handle transactions
and stop the service only if a catastrophy happens or the user commits
a shutdown.
"""
logging.info('provide service')
poll = self.em.poll
self.tm.reset()
self.changeClusterState(ClusterStates.RUNNING)
# Now everything is passive.
try:
while True:
poll(1)
except StateChangedException, e:
if e.args[0] != ClusterStates.STARTING_BACKUP:
raise
self.backup_tid = tid = self.getLastTransaction()
self.pt.setBackupTidDict({node.getUUID(): tid
for node in self.nm.getStorageList(only_identified=True)})
def playPrimaryRole(self):
logging.info('play the primary role with %r', self.listening_conn)
self.master_address_dict.clear()
em = self.em
packet = Packets.AnnouncePrimary()
for conn in em.getConnectionList():
if conn.isListening():
conn.setHandler(identification.IdentificationHandler(self))
else:
conn.notify(packet)
# Primary master should rather establish connections to all
# secondaries, rather than the other way around. This requires
# a bit more work when a new master joins a cluster but makes
# it easier to resolve UUID conflicts with minimal cluster
# impact, and ensure primary master unicity (primary masters
# become noisy, in that they actively try to maintain
# connections to all other master nodes, so duplicate
# primaries will eventually get in touch with each other and
# resolve the situation with a duel).
# TODO: only abort client connections, don't close server
# connections as we want to have them in the end. Secondary
# masters will reconnect nevertheless, but it's dirty.
# Currently, it's not trivial to preserve connected nodes,
# because of poor node status tracking during election.
conn.abort()
# If I know any storage node, make sure that they are not in the
# running state, because they are not connected at this stage.
for node in self.nm.getStorageList():
if node.isRunning():
node.setTemporarilyDown()
if self.uuid is None:
self.uuid = self.getNewUUID(None, self.server, NodeTypes.MASTER)
logging.info('My UUID: ' + uuid_str(self.uuid))
else:
in_conflict = self.nm.getByUUID(self.uuid)
if in_conflict is not None:
logging.warning('UUID conflict at election exit with %r',
in_conflict)
in_conflict.setUUID(None)
# recover the cluster status at startup
try:
self.runManager(RecoveryManager)
while True:
self.runManager(VerificationManager)
try:
if self.backup_tid:
if self.backup_app is None:
raise RuntimeError("No upstream cluster to backup"
" defined in configuration")
self.backup_app.provideService()
# Reset connection with storages (and go through a
# recovery phase) when leaving backup mode in order
# to get correct last oid/tid.
self.runManager(RecoveryManager)
continue
self.provideService()
except OperationFailure:
logging.critical('No longer operational')
for node in self.nm.getIdentifiedList():
if node.isStorage() or node.isClient():
node.notify(Packets.StopOperation())
if node.isClient():
node.getConnection().abort()
except StateChangedException, e:
assert e.args[0] == ClusterStates.STOPPING
self.shutdown()
def playSecondaryRole(self):
"""
I play a secondary role, thus only wait for a primary master to fail.
"""
logging.info('play the secondary role with %r', self.listening_conn)
# Wait for an announcement. If this is too long, probably
# the primary master is down.
t = time() + 10
while self.primary_master_node is None:
self.em.poll(1)
if t < time():
# election timeout
raise ElectionFailure("Election timeout")
self.master_address_dict.clear()
# Restart completely. Non-optimized
# but lower level code needs to be stabilized first.
for conn in self.em.getConnectionList():
if not conn.isListening():
conn.close()
# Reconnect to primary master node.
primary_handler = secondary.PrimaryHandler(self)
ClientConnection(self.em, primary_handler, self.primary_master_node)
# and another for the future incoming connections
self.listening_conn.setHandler(
identification.SecondaryIdentificationHandler(self))
while True:
self.em.poll(1)
def runManager(self, manager_klass):
self._current_manager = manager_klass(self)
try:
self._current_manager.run()
finally:
self._current_manager = None
def changeClusterState(self, state):
"""
Change the cluster state and apply right handler on each connections
"""
if self.cluster_state == state:
return
# select the storage handler
client_handler = self.client_service_handler
if state in (ClusterStates.RUNNING, ClusterStates.STARTING_BACKUP,
ClusterStates.BACKINGUP, ClusterStates.STOPPING_BACKUP):
storage_handler = self.storage_service_handler
elif self._current_manager is not None:
storage_handler = self._current_manager.getHandler()
elif state == ClusterStates.STOPPING:
storage_handler = None
else:
raise RuntimeError('Unexpected cluster state')
# change handlers
notification_packet = Packets.NotifyClusterInformation(state)
for node in self.nm.getIdentifiedList():
conn = node.getConnection()
conn.notify(notification_packet)
if node.isClient():
if state != ClusterStates.RUNNING:
conn.abort()
continue
handler = client_handler
elif node.isStorage() and storage_handler:
handler = storage_handler
else:
continue # keep handler
if type(handler) is not type(conn.getLastHandler()):
conn.setHandler(handler)
handler.connectionCompleted(conn)
self.cluster_state = state
def getNewUUID(self, uuid, address, node_type):
getByUUID = self.nm.getByUUID
if None != uuid != self.uuid:
node = getByUUID(uuid)
if node is None or node.getAddress() == address:
return uuid
hob = UUID_NAMESPACES[node_type]
for uuid in xrange((hob << 24) + 1, hob + 0x10 << 24):
if uuid != self.uuid and getByUUID(uuid) is None:
return uuid
raise RuntimeError
def getClusterState(self):
return self.cluster_state
def shutdown(self):
"""Close all connections and exit"""
self.changeClusterState(ClusterStates.STOPPING)
self.listening_conn.close()
for conn in self.em.getConnectionList():
node = self.nm.getByUUID(conn.getUUID())
if node is None or not node.isIdentified():
conn.close()
# No need to change handlers in order to reject RequestIdentification
# & AskBeginTransaction packets because they won't be any:
# the only remaining connected peers are identified non-clients
# and we don't accept new connections anymore.
try:
# wait for all transaction to be finished
while self.tm.hasPending():
self.em.poll(1)
except OperationFailure:
logging.critical('No longer operational')
logging.info("asking remaining nodes to shutdown")
handler = EventHandler(self)
for node in self.nm.getConnectedList():
conn = node.getConnection()
if node.isStorage():
conn.setHandler(handler)
conn.notify(Packets.NotifyNodeInformation(((
node.getType(), node.getAddress(), node.getUUID(),
NodeStates.TEMPORARILY_DOWN),)))
conn.abort()
elif conn.pending():
conn.abort()
else:
conn.close()
while self.em.connection_dict:
self.em.poll(1)
# then shutdown
sys.exit()
def identifyStorageNode(self, known):
if known:
state = NodeStates.RUNNING
else:
# same as for verification
state = NodeStates.PENDING
return state, self.storage_service_handler
def onTransactionCommitted(self, txn):
# I have received all the lock answers now:
# - send a Notify Transaction Finished to the initiated client node
# - Invalidate Objects to the other client nodes
ttid = txn.getTTID()
tid = txn.getTID()
transaction_node = txn.getNode()
invalidate_objects = Packets.InvalidateObjects(tid, txn.getOIDList())
transaction_finished = Packets.AnswerTransactionFinished(ttid, tid)
for client_node in self.nm.getClientList(only_identified=True):
c = client_node.getConnection()
if client_node is transaction_node:
c.answer(transaction_finished, msg_id=txn.getMessageId())
else:
c.notify(invalidate_objects)
# Unlock Information to relevant storage nodes.
notify_unlock = Packets.NotifyUnlockInformation(ttid)
getByUUID = self.nm.getByUUID
for storage_uuid in txn.getUUIDList():
getByUUID(storage_uuid).getConnection().notify(notify_unlock)
# Notify storage that have replications blocked by this transaction
notify_finished = Packets.NotifyTransactionFinished(ttid, tid)
for storage_uuid in txn.getNotificationUUIDList():
node = getByUUID(storage_uuid)
if node is not None and node.isConnected():
node.getConnection().notify(notify_finished)
# remove transaction from manager
self.tm.remove(transaction_node.getUUID(), ttid)
assert self.last_transaction < tid, (self.last_transaction, tid)
self.setLastTransaction(tid)
def getLastTransaction(self):
return self.last_transaction
def setLastTransaction(self, tid):
self.last_transaction = tid
def setStorageNotReady(self, uuid):
self.storage_readiness.discard(uuid)
def setStorageReady(self, uuid):
self.storage_readiness.add(uuid)
def isStorageReady(self, uuid):
return uuid in self.storage_readiness
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/master/backup_app.py 0000664 0000000 0000000 00000037741 12601037530 0025643 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2012-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import random, weakref
from bisect import bisect
from collections import defaultdict
from neo.lib import logging
from neo.lib.bootstrap import BootstrapManager
from neo.lib.exception import PrimaryFailure
from neo.lib.handler import EventHandler
from neo.lib.node import NodeManager
from neo.lib.protocol import CellStates, ClusterStates, \
NodeStates, NodeTypes, Packets, uuid_str, INVALID_TID, ZERO_TID
from neo.lib.util import add64, dump
from .app import StateChangedException
from .pt import PartitionTable
from .handlers.backup import BackupHandler
"""
Backup algorithm
This implementation relies on normal storage replication.
Storage nodes that are specialised for backup are not in the same NEO cluster,
but are managed by another master in a different cluster.
When the cluster is in BACKINGUP state, its master acts like a client to the
master of the main cluster. It gets notified of new data thanks to invalidation,
and notifies in turn its storage nodes what/when to replicate.
Storages stay in UP_TO_DATE state, even if partitions are synchronized up to
different tids. Storage nodes remember they are in such state and when
switching into RUNNING state, the cluster cuts the DB at the "backup TID", which
is the last TID for which we have all data. This TID can't be guessed from
'trans' and 'obj' tables, like it is done in normal mode, so:
- The master must even notify storages of transactions that don't modify their
partitions: see Replicate packets without any source.
- 'backup_tid' properties exist in many places, on the master and the storages,
so that the DB can be made consistent again at any moment, without losing
any (or little) data.
Out of backup storage nodes assigned to a partition, one is chosen as primary
for that partition. It means only this node will fetch data from the upstream
cluster, to minimize bandwidth between clusters. Other replicas will
synchronize from the primary node.
There is no UUID conflict between the 2 clusters:
- Storage nodes connect anonymously to upstream.
- Master node receives a new from upstream master and uses it only when
communicating with it.
"""
class BackupApplication(object):
pt = None
def __init__(self, app, name, master_addresses):
self.app = weakref.proxy(app)
self.name = name
self.nm = NodeManager()
for master_address in master_addresses:
self.nm.createMaster(address=master_address)
em = property(lambda self: self.app.em)
def close(self):
self.nm.close()
del self.__dict__
def log(self):
self.nm.log()
if self.pt is not None:
self.pt.log()
def provideService(self):
logging.info('provide backup')
poll = self.em.poll
app = self.app
pt = app.pt
while True:
app.changeClusterState(ClusterStates.STARTING_BACKUP)
bootstrap = BootstrapManager(self, self.name, NodeTypes.CLIENT)
# {offset -> node}
self.primary_partition_dict = {}
# [[tid]]
self.tid_list = tuple([] for _ in xrange(pt.getPartitions()))
try:
while True:
for node in pt.getNodeSet(readable=True):
if not app.isStorageReady(node.getUUID()):
break
else:
break
poll(1)
node, conn, uuid, num_partitions, num_replicas = \
bootstrap.getPrimaryConnection()
try:
app.changeClusterState(ClusterStates.BACKINGUP)
del bootstrap, node
if num_partitions != pt.getPartitions():
raise RuntimeError("inconsistent number of partitions")
self.pt = PartitionTable(num_partitions, num_replicas)
conn.setHandler(BackupHandler(self))
conn.ask(Packets.AskNodeInformation())
conn.ask(Packets.AskPartitionTable())
conn.ask(Packets.AskLastTransaction())
# debug variable to log how big 'tid_list' can be.
self.debug_tid_count = 0
while True:
poll(1)
except PrimaryFailure, msg:
logging.error('upstream master is down: %s', msg)
finally:
app.backup_tid = pt.getBackupTid()
try:
conn.close()
except PrimaryFailure:
pass
try:
del self.pt
except AttributeError:
pass
except StateChangedException, e:
if e.args[0] != ClusterStates.STOPPING_BACKUP:
raise
app.changeClusterState(*e.args)
tid = app.backup_tid
# Wait for non-primary partitions to catch up,
# so that all UP_TO_DATE cells are really UP_TO_DATE.
# XXX: Another possibility could be to outdate such cells, and
# they would be quickly updated at the beginning of the
# RUNNING phase. This may simplify code.
# Any unfinished replication from upstream will be truncated.
while pt.getBackupTid(min) < tid:
poll(1)
last_tid = app.getLastTransaction()
handler = EventHandler(app)
if tid < last_tid:
assert tid != ZERO_TID
logging.warning("Truncating at %s (last_tid was %s)",
dump(app.backup_tid), dump(last_tid))
# XXX: We want to go through a recovery phase in order to
# initialize the transaction manager, but this is only
# possible if storages already know that we left backup
# mode. To that purpose, we always send a Truncate packet,
# even if there's nothing to truncate.
p = Packets.Truncate(tid)
for node in app.nm.getStorageList(only_identified=True):
conn = node.getConnection()
conn.setHandler(handler)
node.setState(NodeStates.TEMPORARILY_DOWN)
# Packets will be sent at the beginning of the recovery
# phase.
conn.notify(p)
conn.abort()
# If any error happened before reaching this line, we'd go back
# to backup mode, which is the right mode to recover.
del app.backup_tid
break
finally:
del self.primary_partition_dict, self.tid_list
pt.clearReplicating()
def nodeLost(self, node):
getCellList = self.app.pt.getCellList
trigger_set = set()
for offset, primary_node in self.primary_partition_dict.items():
if primary_node is not node:
continue
cell_list = getCellList(offset, readable=True)
cell = max(cell_list, key=lambda cell: cell.backup_tid)
tid = cell.backup_tid
self.primary_partition_dict[offset] = primary_node = cell.getNode()
p = Packets.Replicate(tid, '', {offset: primary_node.getAddress()})
for cell in cell_list:
cell.replicating = tid
if cell.backup_tid < tid:
logging.debug(
"ask %s to replicate partition %u up to %s from %s",
uuid_str(cell.getUUID()), offset, dump(tid),
uuid_str(primary_node.getUUID()))
cell.getNode().getConnection().notify(p)
trigger_set.add(primary_node)
for node in trigger_set:
self.triggerBackup(node)
def invalidatePartitions(self, tid, partition_set):
app = self.app
prev_tid = app.getLastTransaction()
app.setLastTransaction(tid)
pt = app.pt
trigger_set = set()
untouched_dict = defaultdict(dict)
for offset in xrange(pt.getPartitions()):
try:
last_max_tid = self.tid_list[offset][-1]
except IndexError:
last_max_tid = prev_tid
if offset in partition_set:
self.tid_list[offset].append(tid)
node_list = []
for cell in pt.getCellList(offset, readable=True):
node = cell.getNode()
assert node.isConnected(), node
if cell.backup_tid == prev_tid:
# Let's given 4 TID t0,t1,t2,t3: if a cell is only
# modified by t0 & t3 and has all data for t0, 4 values
# are possible for its 'backup_tid' until it replicates
# up to t3: t0, t1, t2 or t3 - 1
# Choosing the smallest one (t0) is easier to implement
# but when leaving backup mode, we would always lose
# data if the last full transaction does not modify
# all partitions. t1 is wrong for the same reason.
# So we have chosen the highest one (t3 - 1).
# t2 should also work but maybe harder to implement.
cell.backup_tid = add64(tid, -1)
logging.debug(
"partition %u: updating backup_tid of %r to %s",
offset, cell, dump(cell.backup_tid))
else:
assert cell.backup_tid < last_max_tid, (
cell.backup_tid, last_max_tid, prev_tid, tid)
if app.isStorageReady(node.getUUID()):
node_list.append(node)
assert node_list
trigger_set.update(node_list)
# Make sure we have a primary storage for this partition.
if offset not in self.primary_partition_dict:
self.primary_partition_dict[offset] = \
random.choice(node_list)
else:
# Partition not touched, so increase 'backup_tid' of all
# "up-to-date" replicas, without having to replicate.
for cell in pt.getCellList(offset, readable=True):
if last_max_tid <= cell.backup_tid:
cell.backup_tid = tid
untouched_dict[cell.getNode()][offset] = None
elif last_max_tid <= cell.replicating:
# Same for 'replicating' to avoid useless orders.
logging.debug("silently update replicating order"
" of %s for partition %u, up to %s",
uuid_str(cell.getUUID()), offset, dump(tid))
cell.replicating = tid
for node, untouched_dict in untouched_dict.iteritems():
if app.isStorageReady(node.getUUID()):
node.notify(Packets.Replicate(tid, '', untouched_dict))
for node in trigger_set:
self.triggerBackup(node)
count = sum(map(len, self.tid_list))
if self.debug_tid_count < count:
logging.debug("Maximum number of tracked tids: %u", count)
self.debug_tid_count = count
def triggerBackup(self, node):
tid_list = self.tid_list
tid = self.app.getLastTransaction()
replicate_list = []
for offset, cell in self.app.pt.iterNodeCell(node):
max_tid = tid_list[offset]
if max_tid and self.primary_partition_dict[offset] is node and \
max(cell.backup_tid, cell.replicating) < max_tid[-1]:
cell.replicating = tid
replicate_list.append(offset)
if not replicate_list:
return
getCellList = self.pt.getCellList
source_dict = {}
address_set = set()
for offset in replicate_list:
cell_list = getCellList(offset, readable=True)
random.shuffle(cell_list)
assert cell_list, offset
for cell in cell_list:
addr = cell.getAddress()
if addr in address_set:
break
else:
address_set.add(addr)
source_dict[offset] = addr
logging.debug("ask %s to replicate partition %u up to %s from %r",
uuid_str(node.getUUID()), offset, dump(tid), addr)
node.getConnection().notify(Packets.Replicate(
tid, self.name, source_dict))
def notifyReplicationDone(self, node, offset, tid):
app = self.app
cell = app.pt.getCell(offset, node.getUUID())
tid_list = self.tid_list[offset]
if tid_list: # may be empty if the cell is out-of-date
# or if we're not fully initialized
if tid < tid_list[0]:
cell.replicating = tid
else:
try:
tid = add64(tid_list[bisect(tid_list, tid)], -1)
except IndexError:
last_tid = app.getLastTransaction()
if tid < last_tid:
tid = last_tid
node.notify(Packets.Replicate(tid, '', {offset: None}))
logging.debug("partition %u: updating backup_tid of %r to %s",
offset, cell, dump(tid))
cell.backup_tid = tid
# Forget tids we won't need anymore.
cell_list = app.pt.getCellList(offset, readable=True)
del tid_list[:bisect(tid_list, min(x.backup_tid for x in cell_list))]
primary_node = self.primary_partition_dict.get(offset)
primary = primary_node is node
result = None if primary else app.pt.setUpToDate(node, offset)
assert cell.isReadable()
if result: # was out-of-date
if primary_node is not None:
max_tid, = [x.backup_tid for x in cell_list
if x.getNode() is primary_node]
if tid < max_tid:
cell.replicating = max_tid
logging.debug(
"ask %s to replicate partition %u up to %s from %s",
uuid_str(node.getUUID()), offset, dump(max_tid),
uuid_str(primary_node.getUUID()))
node.notify(Packets.Replicate(max_tid, '',
{offset: primary_node.getAddress()}))
else:
if app.getClusterState() == ClusterStates.BACKINGUP:
self.triggerBackup(node)
if primary:
# Notify secondary storages that they can replicate from
# primary ones, even if they are already replicating.
p = Packets.Replicate(tid, '', {offset: node.getAddress()})
for cell in cell_list:
if max(cell.backup_tid, cell.replicating) < tid:
cell.replicating = tid
logging.debug(
"ask %s to replicate partition %u up to %s from %s",
uuid_str(cell.getUUID()), offset,
dump(tid), uuid_str(node.getUUID()))
cell.getNode().notify(p)
return result
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/master/handlers/ 0000775 0000000 0000000 00000000000 12601037530 0024750 5 ustar 00root root 0000000 0000000 neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/master/handlers/__init__.py 0000664 0000000 0000000 00000011473 12601037530 0027067 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2006-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
from neo.lib import logging
from neo.lib.handler import EventHandler
from neo.lib.protocol import (uuid_str, NodeTypes, NodeStates, Packets,
BrokenNodeDisallowedError,
)
class MasterHandler(EventHandler):
"""This class implements a generic part of the event handlers."""
def requestIdentification(self, conn, node_type, uuid, address, name):
self.checkClusterName(name)
app = self.app
node = app.nm.getByUUID(uuid)
if node:
assert node_type is not NodeTypes.MASTER or node.getAddress() in (
address, None), (node, address)
if node.isBroken():
raise BrokenNodeDisallowedError
else:
node = app.nm.getByAddress(address)
peer_uuid = self._setupNode(conn, node_type, uuid, address, node)
if app.primary:
primary_address = app.server
elif app.primary_master_node is not None:
primary_address = app.primary_master_node.getAddress()
else:
primary_address = None
known_master_list = [(app.server, app.uuid)]
for n in app.nm.getMasterList():
if n.isBroken():
continue
known_master_list.append((n.getAddress(), n.getUUID()))
conn.answer(Packets.AcceptIdentification(
NodeTypes.MASTER,
app.uuid,
app.pt.getPartitions(),
app.pt.getReplicas(),
peer_uuid,
primary_address,
known_master_list),
)
def askClusterState(self, conn):
state = self.app.getClusterState()
conn.answer(Packets.AnswerClusterState(state))
def askLastIDs(self, conn):
app = self.app
conn.answer(Packets.AnswerLastIDs(
app.tm.getLastOID(),
app.tm.getLastTID(),
app.pt.getID(),
app.backup_tid))
def askLastTransaction(self, conn):
conn.answer(Packets.AnswerLastTransaction(
self.app.getLastTransaction()))
def askNodeInformation(self, conn):
nm = self.app.nm
node_list = []
node_list.extend(n.asTuple() for n in nm.getMasterList())
node_list.extend(n.asTuple() for n in nm.getClientList())
node_list.extend(n.asTuple() for n in nm.getStorageList())
conn.notify(Packets.NotifyNodeInformation(node_list))
conn.answer(Packets.AnswerNodeInformation())
def askPartitionTable(self, conn):
ptid = self.app.pt.getID()
row_list = self.app.pt.getRowList()
conn.answer(Packets.AnswerPartitionTable(ptid, row_list))
DISCONNECTED_STATE_DICT = {
NodeTypes.STORAGE: NodeStates.TEMPORARILY_DOWN,
}
class BaseServiceHandler(MasterHandler):
"""This class deals with events for a service phase."""
def nodeLost(self, conn, node):
# This method provides a hook point overridable by service classes.
# It is triggered when a connection to a node gets lost.
pass
def connectionLost(self, conn, new_state):
node = self.app.nm.getByUUID(conn.getUUID())
if node is None:
return # for example, when a storage is removed by an admin
if new_state != NodeStates.BROKEN:
new_state = DISCONNECTED_STATE_DICT.get(node.getType(),
NodeStates.DOWN)
assert new_state in (NodeStates.TEMPORARILY_DOWN, NodeStates.DOWN,
NodeStates.BROKEN), new_state
assert node.getState() not in (NodeStates.TEMPORARILY_DOWN,
NodeStates.DOWN, NodeStates.BROKEN), (uuid_str(self.app.uuid),
node.whoSetState(), new_state)
was_pending = node.isPending()
node.setState(new_state)
if new_state != NodeStates.BROKEN and was_pending:
# was in pending state, so drop it from the node manager to forget
# it and do not set in running state when it comes back
logging.info('drop a pending node from the node manager')
self.app.nm.remove(node)
self.app.broadcastNodesInformation([node])
# clean node related data in specialized handlers
self.nodeLost(conn, node)
def notifyReady(self, conn):
self.app.setStorageReady(conn.getUUID())
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/master/handlers/administration.py 0000664 0000000 0000000 00000021353 12601037530 0030353 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2006-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import random
from . import MasterHandler
from ..app import StateChangedException
from neo.lib import logging
from neo.lib.pt import PartitionTableException
from neo.lib.protocol import ClusterStates, Errors, \
NodeStates, NodeTypes, Packets, ProtocolError, uuid_str
from neo.lib.util import dump
CLUSTER_STATE_WORKFLOW = {
# destination: sources
ClusterStates.VERIFYING: (ClusterStates.RECOVERING,),
ClusterStates.STARTING_BACKUP: (ClusterStates.RUNNING,
ClusterStates.STOPPING_BACKUP),
ClusterStates.STOPPING_BACKUP: (ClusterStates.BACKINGUP,
ClusterStates.STARTING_BACKUP),
}
NODE_STATE_WORKFLOW = {
NodeTypes.MASTER: (NodeStates.UNKNOWN,),
NodeTypes.STORAGE: (NodeStates.UNKNOWN, NodeStates.DOWN),
}
class AdministrationHandler(MasterHandler):
"""This class deals with messages from the admin node only"""
def connectionLost(self, conn, new_state):
node = self.app.nm.getByUUID(conn.getUUID())
self.app.nm.remove(node)
def setClusterState(self, conn, state):
app = self.app
# check request
try:
if app.cluster_state not in CLUSTER_STATE_WORKFLOW[state]:
raise ProtocolError('Can not switch to this state')
except KeyError:
if state != ClusterStates.STOPPING:
raise ProtocolError('Invalid state requested')
# change state
if state == ClusterStates.VERIFYING:
storage_list = app.nm.getStorageList(only_identified=True)
if not storage_list:
raise ProtocolError('Cannot exit recovery without any '
'storage node')
for node in storage_list:
assert node.isPending(), node
if node.getConnection().isPending():
raise ProtocolError('Cannot exit recovery now: node %r is '
'entering cluster' % (node, ))
app._startup_allowed = True
state = app.cluster_state
elif state == ClusterStates.STARTING_BACKUP:
if app.tm.hasPending() or app.nm.getClientList(True):
raise ProtocolError("Can not switch to %s state with pending"
" transactions or connected clients" % state)
conn.answer(Errors.Ack('Cluster state changed'))
if state != app.cluster_state:
raise StateChangedException(state)
def setNodeState(self, conn, uuid, state):
logging.info("set node state for %s: %s", uuid_str(uuid), state)
app = self.app
node = app.nm.getByUUID(uuid)
if node is None:
raise ProtocolError('unknown node')
if state not in NODE_STATE_WORKFLOW.get(node.getType(), ()):
raise ProtocolError('can not switch node to this state')
if uuid == app.uuid:
raise ProtocolError('can not kill primary master node')
state_changed = state != node.getState()
message = ('state changed' if state_changed else
'node already in %s state' % state)
if node.isStorage():
keep = state == NodeStates.UNKNOWN
try:
cell_list = app.pt.dropNodeList([node], keep)
except PartitionTableException, e:
raise ProtocolError(str(e))
node.setState(state)
if node.isConnected():
# notify itself so it can shutdown
node.notify(Packets.NotifyNodeInformation([node.asTuple()]))
# close to avoid handle the closure as a connection lost
node.getConnection().abort()
if keep:
cell_list = app.pt.outdate()
elif cell_list:
message = 'node permanently removed'
app.broadcastPartitionChanges(cell_list)
else:
node.setState(state)
# /!\ send the node information *after* the partition table change
conn.answer(Errors.Ack(message))
if state_changed:
# notify node explicitly because broadcastNodesInformation()
# ignores non-running nodes
assert not node.isRunning()
if node.isConnected():
node.notify(Packets.NotifyNodeInformation([node.asTuple()]))
app.broadcastNodesInformation([node])
def addPendingNodes(self, conn, uuid_list):
uuids = ', '.join(map(uuid_str, uuid_list))
logging.debug('Add nodes %s', uuids)
app = self.app
state = app.getClusterState()
# XXX: Would it be safe to allow more states ?
if state not in (ClusterStates.RUNNING,
ClusterStates.STARTING_BACKUP,
ClusterStates.BACKINGUP):
raise ProtocolError('Can not add nodes in %s state' % state)
# take all pending nodes
node_list = list(app.pt.addNodeList(node
for node in app.nm.getStorageList()
if node.isPending() and node.getUUID() in uuid_list))
if node_list:
p = Packets.StartOperation(bool(app.backup_tid))
for node in node_list:
node.setRunning()
node.notify(p)
app.broadcastNodesInformation(node_list)
conn.answer(Errors.Ack('Nodes added: %s' %
', '.join(uuid_str(x.getUUID()) for x in node_list)))
else:
logging.warning('No node added')
conn.answer(Errors.Ack('No node added'))
def tweakPartitionTable(self, conn, uuid_list):
app = self.app
state = app.getClusterState()
# XXX: Would it be safe to allow more states ?
if state not in (ClusterStates.RUNNING,
ClusterStates.STARTING_BACKUP,
ClusterStates.BACKINGUP):
raise ProtocolError('Can not tweak partition table in %s state'
% state)
app.broadcastPartitionChanges(app.pt.tweak(
map(app.nm.getByUUID, uuid_list)))
conn.answer(Errors.Ack(''))
def checkReplicas(self, conn, partition_dict, min_tid, max_tid):
app = self.app
pt = app.pt
backingup = bool(app.backup_tid)
if not max_tid:
max_tid = pt.getCheckTid(partition_dict) if backingup else \
app.getLastTransaction()
if min_tid > max_tid:
logging.warning("nothing to check: min_tid=%s > max_tid=%s",
dump(min_tid), dump(max_tid))
else:
getByUUID = app.nm.getByUUID
node_set = set()
for offset, source in partition_dict.iteritems():
# XXX: For the moment, code checking replicas is unable to fix
# corrupted partitions (when a good cell is known)
# so only check readable ones.
# (see also Checker._nextPartition of storage)
cell_list = pt.getCellList(offset, True)
#cell_list = [cell for cell in pt.getCellList(offset)
# if not cell.isOutOfDate()]
if len(cell_list) + (backingup and not source) <= 1:
continue
for cell in cell_list:
node = cell.getNode()
if node in node_set:
break
else:
node_set.add(node)
if source:
source = '', getByUUID(source).getAddress()
else:
readable = [cell for cell in cell_list if cell.isReadable()]
if 1 == len(readable) < len(cell_list):
source = '', readable[0].getAddress()
elif backingup:
source = app.backup_app.name, random.choice(
app.backup_app.pt.getCellList(offset, readable=True)
).getAddress()
else:
source = '', None
node.getConnection().notify(Packets.CheckPartition(
offset, source, min_tid, max_tid))
conn.answer(Errors.Ack(''))
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/master/handlers/backup.py 0000664 0000000 0000000 00000003710 12601037530 0026570 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2012-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
from neo.lib.exception import PrimaryFailure
from neo.lib.handler import EventHandler
from neo.lib.protocol import ZERO_TID
class BackupHandler(EventHandler):
"""Handler dedicated to upstream master during BACKINGUP state"""
def connectionLost(self, conn, new_state):
if self.app.app.listening_conn: # if running
raise PrimaryFailure('connection lost')
def answerPartitionTable(self, conn, ptid, row_list):
self.app.pt.load(ptid, row_list, self.app.nm)
def notifyPartitionChanges(self, conn, ptid, cell_list):
self.app.pt.update(ptid, cell_list, self.app.nm)
def answerNodeInformation(self, conn):
pass
def notifyNodeInformation(self, conn, node_list):
self.app.nm.update(node_list)
def answerLastTransaction(self, conn, tid):
app = self.app
if tid != ZERO_TID:
app.invalidatePartitions(tid, set(xrange(app.pt.getPartitions())))
else: # upstream DB is empty
assert app.app.getLastTransaction() == tid
def invalidateObjects(self, conn, tid, oid_list):
app = self.app
getPartition = app.app.pt.getPartition
partition_set = set(map(getPartition, oid_list))
partition_set.add(getPartition(tid))
app.invalidatePartitions(tid, partition_set)
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/master/handlers/client.py 0000664 0000000 0000000 00000007612 12601037530 0026606 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2006-2015 Nexedi SA
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
from neo.lib.protocol import NodeStates, Packets, ProtocolError
from . import MasterHandler
class ClientServiceHandler(MasterHandler):
""" Handler dedicated to client during service state """
def connectionCompleted(self, conn):
pass
def connectionLost(self, conn, new_state):
# cancel its transactions and forgot the node
app = self.app
if app.listening_conn: # if running
node = app.nm.getByUUID(conn.getUUID())
assert node is not None
app.tm.abortFor(node)
node.setState(NodeStates.DOWN)
app.broadcastNodesInformation([node])
app.nm.remove(node)
def askNodeInformation(self, conn):
# send informations about master and storages only
nm = self.app.nm
node_list = []
node_list.extend(n.asTuple() for n in nm.getMasterList())
node_list.extend(n.asTuple() for n in nm.getStorageList())
conn.notify(Packets.NotifyNodeInformation(node_list))
conn.answer(Packets.AnswerNodeInformation())
def askBeginTransaction(self, conn, tid):
"""
A client request a TID, nothing is kept about it until the finish.
"""
app = self.app
node = app.nm.getByUUID(conn.getUUID())
conn.answer(Packets.AnswerBeginTransaction(app.tm.begin(node, tid)))
def askNewOIDs(self, conn, num_oids):
conn.answer(Packets.AnswerNewOIDs(self.app.tm.getNextOIDList(num_oids)))
def askFinishTransaction(self, conn, ttid, oid_list):
app = self.app
pt = app.pt
# Collect partitions related to this transaction.
partition_set = set(map(pt.getPartition, oid_list))
partition_set.add(pt.getPartition(ttid))
# Collect the UUIDs of nodes related to this transaction.
uuid_list = filter(app.isStorageReady, {cell.getUUID()
for part in partition_set
for cell in pt.getCellList(part)
if cell.getNodeState() != NodeStates.HIDDEN})
if not uuid_list:
raise ProtocolError('No storage node ready for transaction')
identified_node_list = app.nm.getIdentifiedList(pool_set=set(uuid_list))
# Request locking data.
# build a new set as we may not send the message to all nodes as some
# might be not reachable at that time
p = Packets.AskLockInformation(
ttid,
app.tm.prepare(
ttid,
pt.getPartitions(),
oid_list,
{x.getUUID() for x in identified_node_list},
conn.getPeerId(),
),
oid_list,
)
for node in identified_node_list:
node.ask(p, timeout=60)
def askPack(self, conn, tid):
app = self.app
if app.packing is None:
storage_list = app.nm.getStorageList(only_identified=True)
app.packing = (conn, conn.getPeerId(),
{x.getUUID() for x in storage_list})
p = Packets.AskPack(tid)
for storage in storage_list:
storage.getConnection().ask(p)
else:
conn.answer(Packets.AnswerPack(False))
def abortTransaction(self, conn, tid):
self.app.tm.remove(conn.getUUID(), tid)
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/master/handlers/election.py 0000664 0000000 0000000 00000011773 12601037530 0027135 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2006-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
from neo.lib import logging
from neo.lib.protocol import uuid_str, NodeTypes, Packets
from neo.lib.protocol import NotReadyError
from neo.lib.exception import ElectionFailure
from neo.lib.handler import EventHandler
from . import MasterHandler
class BaseElectionHandler(EventHandler):
def reelectPrimary(self, conn):
raise ElectionFailure, 'reelection requested'
def announcePrimary(self, conn):
app = self.app
if app.primary:
# I am also the primary... So restart the election.
raise ElectionFailure, 'another primary arises'
try:
address = app.master_address_dict[conn]
assert conn.isServer()
except KeyError:
address = conn.getAddress()
assert conn.isClient()
app.primary = False
app.primary_master_node = node = app.nm.getByAddress(address)
app.negotiating_master_node_set.clear()
logging.info('%s is the primary', node)
def elect(self, conn, peer_address):
app = self.app
if app.server < peer_address:
app.primary = False
if conn is not None:
app.master_address_dict[conn] = peer_address
app.negotiating_master_node_set.discard(peer_address)
class ClientElectionHandler(BaseElectionHandler):
def connectionFailed(self, conn):
addr = conn.getAddress()
node = self.app.nm.getByAddress(addr)
assert node is not None, (uuid_str(self.app.uuid), addr)
# node may still be in unknown state
self.app.negotiating_master_node_set.discard(addr)
super(ClientElectionHandler, self).connectionFailed(conn)
def connectionCompleted(self, conn):
app = self.app
conn.ask(Packets.RequestIdentification(
NodeTypes.MASTER,
app.uuid,
app.server,
app.name,
))
super(ClientElectionHandler, self).connectionCompleted(conn)
def connectionLost(self, conn, new_state):
# Retry connection. Either the node just died (and we will end up in
# connectionFailed) or it just got elected (and we must not ignore
# that node).
addr = conn.getAddress()
self.app.unconnected_master_node_set.add(addr)
self.app.negotiating_master_node_set.discard(addr)
def _acceptIdentification(self, node, peer_uuid, num_partitions,
num_replicas, your_uuid, primary, known_master_list):
app = self.app
# Register new master nodes.
for address, uuid in known_master_list:
if app.server == address:
# This is self.
assert node.getAddress() != primary or uuid == your_uuid, (
uuid_str(uuid), uuid_str(your_uuid))
continue
n = app.nm.getByAddress(address)
if n is None:
n = app.nm.createMaster(address=address)
if primary is not None:
# The primary master is defined.
if app.primary_master_node is not None \
and app.primary_master_node.getAddress() != primary:
# There are multiple primary master nodes. This is
# dangerous.
raise ElectionFailure, 'multiple primary master nodes'
primary_node = app.nm.getByAddress(primary)
if primary_node is None:
# I don't know such a node. Probably this information
# is old. So ignore it.
logging.warning('received an unknown primary node')
else:
# Whatever the situation is, I trust this master.
app.primary = False
app.primary_master_node = primary_node
# Stop waiting for connections than primary master's to
# complete to exit election phase ASAP.
app.negotiating_master_node_set.clear()
return
self.elect(None, node.getAddress())
class ServerElectionHandler(BaseElectionHandler, MasterHandler):
def _setupNode(self, conn, node_type, uuid, address, node):
app = self.app
if node_type != NodeTypes.MASTER:
logging.info('reject a connection from a non-master')
raise NotReadyError
if node is None:
node = app.nm.createMaster(address=address)
self.elect(conn, address)
return uuid
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/master/handlers/identification.py 0000664 0000000 0000000 00000007350 12601037530 0030320 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2006-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
from neo.lib import logging
from neo.lib.protocol import ClusterStates, NodeStates, NodeTypes, \
NotReadyError, ProtocolError, uuid_str
from . import MasterHandler
class IdentificationHandler(MasterHandler):
def requestIdentification(self, conn, *args, **kw):
super(IdentificationHandler, self).requestIdentification(conn, *args,
**kw)
handler = conn.getHandler()
assert not isinstance(handler, IdentificationHandler), handler
handler.connectionCompleted(conn)
def _setupNode(self, conn, node_type, uuid, address, node):
app = self.app
if node:
if node.isRunning():
# cloned/evil/buggy node connecting to us
raise ProtocolError('already connected')
else:
assert not node.isConnected()
node.setAddress(address)
node.setRunning()
state = NodeStates.RUNNING
if node_type == NodeTypes.CLIENT:
if app.cluster_state != ClusterStates.RUNNING:
raise NotReadyError
node_ctor = app.nm.createClient
handler = app.client_service_handler
human_readable_node_type = ' client '
elif node_type == NodeTypes.STORAGE:
if app.cluster_state == ClusterStates.STOPPING_BACKUP:
raise NotReadyError
node_ctor = app.nm.createStorage
manager = app._current_manager
if manager is None:
manager = app
state, handler = manager.identifyStorageNode(
uuid is not None and node is not None)
human_readable_node_type = ' storage (%s) ' % (state, )
elif node_type == NodeTypes.MASTER:
node_ctor = app.nm.createMaster
handler = app.secondary_master_handler
human_readable_node_type = ' master '
elif node_type == NodeTypes.ADMIN:
node_ctor = app.nm.createAdmin
handler = app.administration_handler
human_readable_node_type = 'n admin '
else:
raise NotImplementedError(node_type)
uuid = app.getNewUUID(uuid, address, node_type)
logging.info('Accept a' + human_readable_node_type + uuid_str(uuid))
if node is None:
node = node_ctor(uuid=uuid, address=address)
node.setUUID(uuid)
node.setState(state)
node.setConnection(conn)
conn.setHandler(handler)
app.broadcastNodesInformation([node])
return uuid
class SecondaryIdentificationHandler(MasterHandler):
def announcePrimary(self, conn):
# If we received AnnouncePrimary on a client connection, we might have
# set this handler on server connection, and might receive
# AnnouncePrimary there too. As we cannot reach this without already
# handling a first AnnouncePrimary, we can safely ignore this one.
pass
def _setupNode(self, conn, node_type, uuid, address, node):
# Nothing to do, storage will disconnect when it receives our answer.
# Primary will do the checks.
return uuid
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/master/handlers/secondary.py 0000664 0000000 0000000 00000007172 12601037530 0027320 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2006-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import sys
from . import MasterHandler
from neo.lib.handler import EventHandler
from neo.lib.exception import ElectionFailure, PrimaryFailure
from neo.lib.protocol import NodeStates, NodeTypes, Packets, uuid_str
from neo.lib import logging
class SecondaryMasterHandler(MasterHandler):
""" Handler used by primary to handle secondary masters"""
def connectionLost(self, conn, new_state):
node = self.app.nm.getByUUID(conn.getUUID())
assert node is not None
node.setDown()
self.app.broadcastNodesInformation([node])
def announcePrimary(self, conn):
raise ElectionFailure, 'another primary arises'
def reelectPrimary(self, conn):
raise ElectionFailure, 'reelection requested'
class PrimaryHandler(EventHandler):
""" Handler used by secondaries to handle primary master"""
def connectionLost(self, conn, new_state):
self.app.primary_master_node.setDown()
raise PrimaryFailure, 'primary master is dead'
def connectionFailed(self, conn):
self.app.primary_master_node.setDown()
raise PrimaryFailure, 'primary master is dead'
def connectionCompleted(self, conn):
app = self.app
addr = conn.getAddress()
node = app.nm.getByAddress(addr)
# connection successfull, set it as running
node.setRunning()
conn.ask(Packets.RequestIdentification(
NodeTypes.MASTER,
app.uuid,
app.server,
app.name,
))
super(PrimaryHandler, self).connectionCompleted(conn)
def reelectPrimary(self, conn):
raise ElectionFailure, 'reelection requested'
def notifyClusterInformation(self, conn, state):
self.app.cluster_state = state
def notifyNodeInformation(self, conn, node_list):
app = self.app
for node_type, addr, uuid, state in node_list:
if node_type != NodeTypes.MASTER:
# No interest.
continue
if uuid == app.uuid and state == NodeStates.UNKNOWN:
sys.exit()
# Register new master nodes.
if app.server == addr:
# This is self.
continue
else:
n = app.nm.getByAddress(addr)
# master node must be known
assert n is not None
if uuid is not None:
# If I don't know the UUID yet, believe what the peer
# told me at the moment.
if n.getUUID() is None:
n.setUUID(uuid)
def _acceptIdentification(self, node, uuid, num_partitions,
num_replicas, your_uuid, primary, known_master_list):
app = self.app
if primary != app.primary_master_node.getAddress():
raise PrimaryFailure('unexpected primary uuid')
if your_uuid != app.uuid:
app.uuid = your_uuid
logging.info('My UUID: ' + uuid_str(your_uuid))
node.setUUID(uuid)
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/master/handlers/storage.py 0000664 0000000 0000000 00000010072 12601037530 0026766 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2006-2015 Nexedi SA
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
from neo.lib import logging
from neo.lib.protocol import CellStates, ClusterStates, Packets, ProtocolError
from neo.lib.exception import OperationFailure
from neo.lib.pt import PartitionTableException
from . import BaseServiceHandler
class StorageServiceHandler(BaseServiceHandler):
""" Handler dedicated to storages during service state """
def connectionCompleted(self, conn):
# TODO: unit test
app = self.app
uuid = conn.getUUID()
node = app.nm.getByUUID(uuid)
app.setStorageNotReady(uuid)
# XXX: what other values could happen ?
if node.isRunning():
conn.notify(Packets.StartOperation(bool(app.backup_tid)))
def nodeLost(self, conn, node):
logging.info('storage node lost')
assert not node.isRunning(), node.getState()
app = self.app
app.broadcastPartitionChanges(app.pt.outdate(node))
if not app.pt.operational():
raise OperationFailure, 'cannot continue operation'
app.tm.forget(conn.getUUID())
if app.getClusterState() == ClusterStates.BACKINGUP:
app.backup_app.nodeLost(node)
if app.packing is not None:
self.answerPack(conn, False)
def askUnfinishedTransactions(self, conn):
app = self.app
if app.backup_tid:
last_tid = app.pt.getBackupTid(min)
pending_list = ()
else:
last_tid = app.tm.getLastTID()
pending_list = app.tm.registerForNotification(conn.getUUID())
p = Packets.AnswerUnfinishedTransactions(last_tid, pending_list)
conn.answer(p)
def answerInformationLocked(self, conn, ttid):
tm = self.app.tm
if ttid not in tm:
raise ProtocolError('Unknown transaction')
# transaction locked on this storage node
self.app.tm.lock(ttid, conn.getUUID())
def notifyPartitionCorrupted(self, conn, partition, cell_list):
change_list = []
for cell in self.app.pt.getCellList(partition):
if cell.getUUID() in cell_list:
cell.setState(CellStates.CORRUPTED)
change_list.append((partition, cell.getUUID(),
CellStates.CORRUPTED))
self.app.broadcastPartitionChanges(change_list)
if not self.app.pt.operational():
raise OperationFailure('cannot continue operation')
def notifyReplicationDone(self, conn, offset, tid):
app = self.app
node = app.nm.getByUUID(conn.getUUID())
if app.backup_tid:
cell_list = app.backup_app.notifyReplicationDone(node, offset, tid)
if not cell_list:
return
else:
try:
cell_list = self.app.pt.setUpToDate(node, offset)
if not cell_list:
raise ProtocolError('Non-oudated partition')
except PartitionTableException, e:
raise ProtocolError(str(e))
logging.debug("%s is up for offset %s", node, offset)
self.app.broadcastPartitionChanges(cell_list)
def answerPack(self, conn, status):
app = self.app
if app.packing is not None:
client, msg_id, uid_set = app.packing
uid_set.remove(conn.getUUID())
if not uid_set:
app.packing = None
if not client.isClosed():
client.answer(Packets.AnswerPack(True), msg_id=msg_id)
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/master/pt.py 0000664 0000000 0000000 00000031304 12601037530 0024146 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2006-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
from collections import defaultdict
import neo.lib.pt
from neo.lib.protocol import CellStates, ZERO_TID
class Cell(neo.lib.pt.Cell):
replicating = ZERO_TID
def setState(self, state):
readable = self.isReadable()
super(Cell, self).setState(state)
if readable and not self.isReadable():
try:
del self.backup_tid, self.replicating
except AttributeError:
pass
neo.lib.pt.Cell = Cell
class MappedNode(object):
def __init__(self, node):
self.node = node
self.assigned = set()
def __getattr__(self, attr):
return getattr(self.node, attr)
class PartitionTable(neo.lib.pt.PartitionTable):
"""This class manages a partition table for the primary master node"""
def setID(self, id):
assert isinstance(id, (int, long)) or id is None, id
self._id = id
def setNextID(self):
if self._id is None:
raise RuntimeError, 'I do not know the last Partition Table ID'
self._id += 1
return self._id
def make(self, node_list):
"""Make a new partition table from scratch."""
# start with the first PTID
self._id = 1
# First, filter the list of nodes.
node_list = [n for n in node_list if n.isRunning() \
and n.getUUID() is not None]
if len(node_list) == 0:
# Impossible.
raise RuntimeError, 'cannot make a partition table with an ' \
'empty storage node list'
# Take it into account that the number of storage nodes may be less
# than the number of replicas.
repeats = min(self.nr + 1, len(node_list))
index = 0
for offset in xrange(self.np):
row = []
for _ in xrange(repeats):
node = node_list[index]
row.append(Cell(node))
self.count_dict[node] = self.count_dict.get(node, 0) + 1
index += 1
if index == len(node_list):
index = 0
self.partition_list[offset] = row
self.num_filled_rows = self.np
def dropNodeList(self, node_list, simulate=False):
partition_list = []
change_list = []
feeding_list = []
for offset, row in enumerate(self.partition_list):
new_row = []
partition_list.append(new_row)
feeding = None
drop_readable = uptodate = False
for cell in row:
node = cell.getNode()
if node in node_list:
change_list.append((offset, node.getUUID(),
CellStates.DISCARDED))
if cell.isReadable():
drop_readable = True
else:
new_row.append(cell)
if cell.isFeeding():
feeding = cell
elif cell.isUpToDate():
uptodate = True
if feeding is not None:
if len(new_row) < len(row):
change_list.append((offset, feeding.getUUID(),
CellStates.UP_TO_DATE))
feeding_list.append(feeding)
elif drop_readable and not uptodate:
raise neo.lib.pt.PartitionTableException(
"Refuse to drop nodes that contain the only readable"
" copies of partition %u" % offset)
if not simulate:
self.partition_list = partition_list
for cell in feeding_list:
cell.setState(CellStates.UP_TO_DATE)
self.count_dict[cell.getNode()] += 1
for node in node_list:
self.count_dict.pop(node, None)
self.num_filled_rows = len(filter(None, self.partition_list))
return change_list
def load(self, ptid, row_list, nm):
"""
Load a partition table from a storage node during the recovery.
Return the new storage nodes registered
"""
# check offsets
for offset, _row in row_list:
if offset >= self.getPartitions():
raise IndexError, offset
# store the partition table
self.clear()
self._id = ptid
new_nodes = []
for offset, row in row_list:
for uuid, state in row:
node = nm.getByUUID(uuid)
if node is None:
node = nm.createStorage(uuid=uuid)
new_nodes.append(node.asTuple())
self.setCell(offset, node, state)
return new_nodes
def setUpToDate(self, node, offset):
"""Set a cell as up-to-date"""
uuid = node.getUUID()
# check the partition is assigned and known as outdated
for cell in self.getCellList(offset):
if cell.getUUID() == uuid:
if cell.isOutOfDate():
break
return
else:
raise neo.lib.pt.PartitionTableException('Non-assigned partition')
# update the partition table
cell_list = [self.setCell(offset, node, CellStates.UP_TO_DATE)]
# If the partition contains a feeding cell, drop it now.
for feeding_cell in self.getCellList(offset):
if feeding_cell.isFeeding():
cell_list.append(self.removeCell(offset,
feeding_cell.getNode()))
break
return cell_list
def addNodeList(self, node_list):
"""Add nodes"""
added_list = []
for node in node_list:
if node not in self.count_dict:
self.count_dict[node] = 0
added_list.append(node)
return added_list
def tweak(self, drop_list=()):
"""Optimize partition table
This is done by computing a minimal diff between current partition table
and what make() would do.
"""
assigned_dict = {x: {} for x in self.count_dict}
readable_list = [set() for x in xrange(self.np)]
for offset, row in enumerate(self.partition_list):
for cell in row:
if cell.isReadable():
readable_list[offset].add(cell)
assigned_dict[cell.getNode()][offset] = cell
pt = PartitionTable(self.np, self.nr)
drop_list = set(drop_list).intersection(assigned_dict)
node_set = {MappedNode(x) for x in assigned_dict
if x not in drop_list}
pt.make(node_set)
for offset, row in enumerate(pt.partition_list):
for cell in row:
if cell.isReadable():
cell.getNode().assigned.add(offset)
def map_nodes():
node_list = []
for node, assigned in assigned_dict.iteritems():
if node in drop_list:
yield node, frozenset()
continue
readable = {offset for offset, cell in assigned.iteritems()
if cell.isReadable()}
# the criterion on UUID is purely cosmetic
node_list.append((len(readable), len(assigned),
-node.getUUID(), readable, node))
node_list.sort(reverse=1)
for _, _, _, readable, node in node_list:
assigned = assigned_dict[node]
mapped = min(node_set, key=lambda m: (
len(m.assigned.symmetric_difference(assigned)),
len(m.assigned ^ readable)))
node_set.remove(mapped)
yield node, mapped.assigned
assert not node_set
changed_list = []
uptodate_set = set()
remove_dict = defaultdict(list)
for node, mapped in map_nodes():
uuid = node.getUUID()
assigned = assigned_dict[node]
for offset, cell in assigned.iteritems():
if offset in mapped:
if cell.isReadable():
uptodate_set.add(offset)
readable_list[offset].remove(cell)
if cell.isFeeding():
self.count_dict[node] += 1
state = CellStates.UP_TO_DATE
cell.setState(state)
changed_list.append((offset, uuid, state))
else:
if not cell.isFeeding():
self.count_dict[node] -= 1
remove_dict[offset].append(cell)
for offset in mapped.difference(assigned):
self.count_dict[node] += 1
state = CellStates.OUT_OF_DATE
self.partition_list[offset].append(Cell(node, state))
changed_list.append((offset, uuid, state))
count_dict = self.count_dict.copy()
for offset, cell_list in remove_dict.iteritems():
row = self.partition_list[offset]
feeding = None if offset in uptodate_set else min(
readable_list[offset], key=lambda x: count_dict[x.getNode()])
for cell in cell_list:
if cell is feeding:
count_dict[cell.getNode()] += 1
if cell.isFeeding():
continue
state = CellStates.FEEDING
cell.setState(state)
else:
state = CellStates.DISCARDED
row.remove(cell)
changed_list.append((offset, cell.getUUID(), state))
assert self.num_filled_rows == len(filter(None, self.partition_list))
return changed_list
def outdate(self, lost_node=None):
"""Outdate all non-working nodes
Do not outdate cells of 'lost_node' for partitions it was the last node
to serve. This allows a cluster restart.
"""
change_list = []
for offset, row in enumerate(self.partition_list):
lost = lost_node
cell_list = []
for cell in row:
if cell.isReadable():
if cell.getNode().isRunning():
lost = None
else :
cell_list.append(cell)
for cell in cell_list:
if cell.getNode() is not lost:
cell.setState(CellStates.OUT_OF_DATE)
change_list.append((offset, cell.getUUID(),
CellStates.OUT_OF_DATE))
return change_list
def iterNodeCell(self, node):
for offset, row in enumerate(self.partition_list):
for cell in row:
if cell.getNode() is node:
yield offset, cell
break
def getReadableCellNodeSet(self):
"""
Return a set of all nodes which are part of at least one UP TO DATE
partition.
"""
return {cell.getNode()
for row in self.partition_list
for cell in row
if cell.isReadable()}
def clearReplicating(self):
for row in self.partition_list:
for cell in row:
try:
del cell.replicating
except AttributeError:
pass
def setBackupTidDict(self, backup_tid_dict):
for row in self.partition_list:
for cell in row:
if cell.isReadable():
cell.backup_tid = backup_tid_dict.get(cell.getUUID(),
ZERO_TID)
def getBackupTid(self, mean=max):
try:
return min(mean(x.backup_tid for x in row if x.isReadable())
for row in self.partition_list)
except ValueError:
return ZERO_TID
def getCheckTid(self, partition_list):
try:
return min(min(cell.backup_tid
for cell in self.partition_list[offset]
if cell.isReadable())
for offset in partition_list)
except ValueError:
return ZERO_TID
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/master/recovery.py 0000664 0000000 0000000 00000013246 12601037530 0025366 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2006-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
from neo.lib import logging
from neo.lib.util import dump
from neo.lib.protocol import Packets, ProtocolError, ClusterStates, NodeStates
from neo.lib.protocol import ZERO_OID
from .handlers import MasterHandler
class RecoveryManager(MasterHandler):
"""
Manage the cluster recovery
"""
def __init__(self, app):
# The target node's uuid to request next.
self.target_ptid = None
self.backup_tid_dict = {}
def getHandler(self):
return self
def identifyStorageNode(self, _):
"""
Returns the handler for storage nodes
"""
return NodeStates.PENDING, self
def run(self):
"""
Recover the status about the cluster. Obtain the last OID, the last
TID, and the last Partition Table ID from storage nodes, then get
back the latest partition table or make a new table from scratch,
if this is the first time.
"""
logging.info('begin the recovery of the status')
app = self.app
pt = app.pt
app.changeClusterState(ClusterStates.RECOVERING)
pt.setID(None)
# collect the last partition table available
poll = app.em.poll
while 1:
poll(1)
if pt.filled():
# A partition table exists, we are starting an existing
# cluster.
node_list = pt.getReadableCellNodeSet()
if app._startup_allowed:
node_list = [node for node in node_list if node.isPending()]
elif not all(node.isPending() for node in node_list):
continue
elif app._startup_allowed or app.autostart:
# No partition table and admin allowed startup, we are
# creating a new cluster out of all pending nodes.
node_list = app.nm.getStorageList(only_identified=True)
if not app._startup_allowed and len(node_list) < app.autostart:
continue
else:
continue
if node_list and not any(node.getConnection().isPending()
for node in node_list):
if pt.filled():
node_list = pt.getConnectedNodeList()
break
logging.info('startup allowed')
for node in node_list:
assert node.isPending(), node
node.setRunning()
app.broadcastNodesInformation(node_list)
if pt.getID() is None:
logging.info('creating a new partition table')
# reset IDs generators & build new partition with running nodes
app.tm.setLastOID(ZERO_OID)
pt.make(node_list)
self._broadcastPartitionTable(pt.getID(), pt.getRowList())
elif app.backup_tid:
pt.setBackupTidDict(self.backup_tid_dict)
app.backup_tid = pt.getBackupTid()
app.setLastTransaction(app.tm.getLastTID())
logging.debug('cluster starts with loid=%s and this partition table :',
dump(app.tm.getLastOID()))
pt.log()
def connectionLost(self, conn, new_state):
node = self.app.nm.getByUUID(conn.getUUID())
assert node is not None
if node.getState() == new_state:
return
node.setState(new_state)
# broadcast to all so that admin nodes gets informed
self.app.broadcastNodesInformation([node])
def connectionCompleted(self, conn):
# ask the last IDs to perform the recovery
conn.ask(Packets.AskLastIDs())
def answerLastIDs(self, conn, loid, ltid, lptid, backup_tid):
# Get max values.
if loid is not None:
self.app.tm.setLastOID(loid)
if ltid is not None:
self.app.tm.setLastTID(ltid)
if lptid > self.target_ptid:
# something newer
self.target_ptid = lptid
conn.ask(Packets.AskPartitionTable())
self.backup_tid_dict[conn.getUUID()] = backup_tid
def answerPartitionTable(self, conn, ptid, row_list):
if ptid != self.target_ptid:
# If this is not from a target node, ignore it.
logging.warn('Got %s while waiting %s', dump(ptid),
dump(self.target_ptid))
else:
self._broadcastPartitionTable(ptid, row_list)
self.app.backup_tid = self.backup_tid_dict[conn.getUUID()]
def _broadcastPartitionTable(self, ptid, row_list):
try:
new_nodes = self.app.pt.load(ptid, row_list, self.app.nm)
except IndexError:
raise ProtocolError('Invalid offset')
else:
notification = Packets.NotifyNodeInformation(new_nodes)
ptid = self.app.pt.getID()
row_list = self.app.pt.getRowList()
partition_table = Packets.SendPartitionTable(ptid, row_list)
# notify the admin nodes
for node in self.app.nm.getAdminList(only_identified=True):
node.notify(notification)
node.notify(partition_table)
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/master/transactions.py 0000664 0000000 0000000 00000031303 12601037530 0026232 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2006-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
from time import time
from struct import pack, unpack
from neo.lib import logging
from neo.lib.protocol import ProtocolError, uuid_str, ZERO_TID
from neo.lib.util import dump, u64, addTID, tidFromTime
class DelayedError(Exception):
pass
class Transaction(object):
"""
A pending transaction
"""
_tid = None
_msg_id = None
_oid_list = None
_prepared = False
# uuid dict hold flag to known who has locked the transaction
_uuid_set = None
_lock_wait_uuid_set = None
def __init__(self, node, ttid):
"""
Prepare the transaction, set OIDs and UUIDs related to it
"""
self._node = node
self._ttid = ttid
self._birth = time()
# store storage uuids that must be notified at commit
self._notification_set = set()
def __repr__(self):
return "<%s(client=%r, tid=%r, oids=%r, storages=%r, age=%.2fs) at %x>" % (
self.__class__.__name__,
self._node,
dump(self._tid),
map(dump, self._oid_list or ()),
map(uuid_str, self._uuid_set or ()),
time() - self._birth,
id(self),
)
def getNode(self):
"""
Return the node that had began the transaction
"""
return self._node
def getTTID(self):
"""
Return the temporary transaction ID.
"""
return self._ttid
def getTID(self):
"""
Return the transaction ID
"""
return self._tid
def getMessageId(self):
"""
Returns the packet ID to use in the answer
"""
return self._msg_id
def getUUIDList(self):
"""
Returns the list of node's UUID that lock the transaction
"""
return list(self._uuid_set)
def getOIDList(self):
"""
Returns the list of OIDs used in the transaction
"""
return list(self._oid_list)
def isPrepared(self):
"""
Returns True if the commit has been requested by the client
"""
return self._prepared
def registerForNotification(self, uuid):
"""
Register a storage node that requires a notification at commit
"""
self._notification_set.add(uuid)
def getNotificationUUIDList(self):
"""
Returns the list of storage waiting for the transaction to be
finished
"""
return list(self._notification_set)
def prepare(self, tid, oid_list, uuid_list, msg_id):
self._tid = tid
self._oid_list = oid_list
self._msg_id = msg_id
self._uuid_set = set(uuid_list)
self._lock_wait_uuid_set = set(uuid_list)
self._prepared = True
def forget(self, uuid):
"""
Given storage was lost while waiting for its lock, stop waiting
for it.
Does nothing if the node was not part of the transaction.
"""
# XXX: We might lose information that a storage successfully locked
# data but was later found to be disconnected. This loss has no impact
# on current code, but it might be disturbing to reader or future code.
if self._prepared:
self._lock_wait_uuid_set.discard(uuid)
self._uuid_set.discard(uuid)
return self.locked()
return False
def lock(self, uuid):
"""
Define that a node has locked the transaction
Returns true if all nodes are locked
"""
self._lock_wait_uuid_set.remove(uuid)
return self.locked()
def locked(self):
"""
Returns true if all nodes are locked
"""
return not self._lock_wait_uuid_set
class TransactionManager(object):
"""
Manage current transactions
"""
_last_tid = ZERO_TID
def __init__(self, on_commit):
# ttid -> transaction
self._ttid_dict = {}
# node -> transactions mapping
self._node_dict = {}
self._last_oid = None
self._on_commit = on_commit
# queue filled with ttids pointing to transactions with increasing tids
self._queue = []
def __getitem__(self, ttid):
"""
Return the transaction object for this TID
"""
# XXX: used by unit tests only
return self._ttid_dict[ttid]
def __contains__(self, ttid):
"""
Returns True if this is a pending transaction
"""
return ttid in self._ttid_dict
def getNextOIDList(self, num_oids):
""" Generate a new OID list """
if self._last_oid is None:
raise RuntimeError, 'I do not know the last OID'
oid = unpack('!Q', self._last_oid)[0] + 1
oid_list = [pack('!Q', oid + i) for i in xrange(num_oids)]
self._last_oid = oid_list[-1]
return oid_list
def setLastOID(self, oid):
self._last_oid = max(oid, self._last_oid)
def getLastOID(self):
return self._last_oid
def _nextTID(self, ttid=None, divisor=None):
"""
Compute the next TID based on the current time and check collisions.
Also, if ttid is not None, divisor is mandatory adjust it so that
tid % divisor == ttid % divisor
while preserving
min_tid < tid
If ttid is None, divisor is ignored.
When constraints allow, prefer decreasing generated TID, to avoid
fast-forwarding to future dates.
"""
tid = tidFromTime(time())
min_tid = self._last_tid
if tid <= min_tid:
tid = addTID(min_tid, 1)
# We know we won't have room to adjust by decreasing.
try_decrease = False
else:
try_decrease = True
if ttid is not None:
assert isinstance(ttid, basestring), repr(ttid)
assert isinstance(divisor, (int, long)), repr(divisor)
ref_remainder = u64(ttid) % divisor
remainder = u64(tid) % divisor
if ref_remainder != remainder:
if try_decrease:
new_tid = addTID(tid, ref_remainder - divisor - remainder)
assert u64(new_tid) % divisor == ref_remainder, (dump(new_tid),
ref_remainder)
if new_tid <= min_tid:
new_tid = addTID(new_tid, divisor)
else:
if ref_remainder > remainder:
ref_remainder += divisor
new_tid = addTID(tid, ref_remainder - remainder)
assert min_tid < new_tid, (dump(min_tid), dump(tid), dump(new_tid))
tid = new_tid
self._last_tid = tid
return self._last_tid
def getLastTID(self):
"""
Returns the last TID used
"""
return self._last_tid
def setLastTID(self, tid):
"""
Set the last TID, keep the previous if lower
"""
self._last_tid = max(self._last_tid, tid)
def reset(self):
"""
Discard all manager content
This doesn't reset the last TID.
"""
self._ttid_dict = {}
self._node_dict = {}
def hasPending(self):
"""
Returns True if some transactions are pending
"""
return bool(self._ttid_dict)
def registerForNotification(self, uuid):
"""
Return the list of pending transaction IDs
"""
# remember that this node must be notified when pending transactions
# will be finished
for txn in self._ttid_dict.itervalues():
txn.registerForNotification(uuid)
return self._ttid_dict.keys()
def begin(self, node, tid=None):
"""
Generate a new TID
"""
if tid is None:
# No TID requested, generate a temporary one
ttid = self._nextTID()
else:
# Use of specific TID requested, queue it immediately and update
# last TID.
self._queue.append((node.getUUID(), tid))
self.setLastTID(tid)
ttid = tid
txn = Transaction(node, ttid)
self._ttid_dict[ttid] = txn
self._node_dict.setdefault(node, {})[ttid] = txn
logging.debug('Begin %s', txn)
return ttid
def prepare(self, ttid, divisor, oid_list, uuid_list, msg_id):
"""
Prepare a transaction to be finished
"""
# XXX: not efficient but the list should be often small
try:
txn = self._ttid_dict[ttid]
except KeyError:
raise ProtocolError("unknown ttid %s" % dump(ttid))
node = txn.getNode()
for _, tid in self._queue:
if ttid == tid:
break
else:
tid = self._nextTID(ttid, divisor)
self._queue.append((node.getUUID(), ttid))
logging.debug('Finish TXN %s for %s (was %s)',
dump(tid), node, dump(ttid))
txn.prepare(tid, oid_list, uuid_list, msg_id)
# check if greater and foreign OID was stored
if oid_list:
self.setLastOID(max(oid_list))
return tid
def remove(self, uuid, ttid):
"""
Remove a transaction, commited or aborted
"""
logging.debug('Remove TXN %s', dump(ttid))
try:
# only in case of an import:
self._queue.remove((uuid, ttid))
except ValueError:
# finish might not have been started
pass
ttid_dict = self._ttid_dict
if ttid in ttid_dict:
txn = ttid_dict[ttid]
node = txn.getNode()
# ...and tried to finish
del ttid_dict[ttid]
del self._node_dict[node][ttid]
def lock(self, ttid, uuid):
"""
Set that a node has locked the transaction.
If transaction is completely locked, calls function given at
instanciation time.
"""
logging.debug('Lock TXN %s for %s', dump(ttid), uuid_str(uuid))
assert ttid in self._ttid_dict, "Transaction not started"
txn = self._ttid_dict[ttid]
if txn.lock(uuid) and self._queue[0][1] == ttid:
# all storage are locked and we unlock the commit queue
self._unlockPending()
def forget(self, uuid):
"""
A storage node has been lost, don't expect a reply from it for
current transactions
"""
unlock = False
# iterate over a copy because _unlockPending may alter the dict
for ttid, txn in self._ttid_dict.items():
if txn.forget(uuid) and self._queue[0][1] == ttid:
unlock = True
if unlock:
self._unlockPending()
def _unlockPending(self):
# unlock pending transactions
queue = self._queue
pop = queue.pop
insert = queue.insert
on_commit = self._on_commit
get = self._ttid_dict.get
while queue:
uuid, ttid = pop(0)
txn = get(ttid, None)
# _queue can contain un-prepared transactions
if txn is not None and txn.locked():
on_commit(txn)
else:
insert(0, (uuid, ttid))
break
def abortFor(self, node):
"""
Abort pending transactions initiated by a node
"""
logging.debug('Abort TXN for %s', node)
uuid = node.getUUID()
# XXX: this loop is usefull only during an import
for nuuid, ntid in list(self._queue):
if nuuid == uuid:
self._queue.remove((uuid, ntid))
if node in self._node_dict:
# remove transactions
remove = self.remove
for ttid in self._node_dict[node].keys():
if not self._ttid_dict[ttid].isPrepared():
remove(uuid, ttid)
# discard node entry
del self._node_dict[node]
def log(self):
logging.info('Transactions:')
for txn in self._ttid_dict.itervalues():
logging.info(' %r', txn)
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/master/verification.py 0000664 0000000 0000000 00000020013 12601037530 0026200 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2006-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
from neo.lib import logging
from neo.lib.util import dump
from neo.lib.protocol import ClusterStates, Packets, NodeStates
from .handlers import BaseServiceHandler
class VerificationFailure(Exception):
"""
Exception raised each time the cluster integrity failed.
- An required storage node is missing
- A transaction or an object is missing on a node
"""
pass
class VerificationManager(BaseServiceHandler):
"""
Manager for verification step of a NEO cluster:
- Wait for at least one available storage per partition
- Check if all expected content is present
"""
def __init__(self, app):
self._oid_set = set()
self._tid_set = set()
self._uuid_set = set()
self._object_present = False
def _askStorageNodesAndWait(self, packet, node_list):
poll = self.app.em.poll
operational = self.app.pt.operational
uuid_set = self._uuid_set
uuid_set.clear()
for node in node_list:
uuid_set.add(node.getUUID())
node.ask(packet)
while True:
poll(1)
if not operational():
raise VerificationFailure
if not uuid_set:
break
def _gotAnswerFrom(self, uuid):
"""
Returns True if answer from given uuid is waited upon by
_askStorageNodesAndWait, False otherwise.
Also, mark this uuid as having answered, so it stops being waited upon
by _askStorageNodesAndWait.
"""
try:
self._uuid_set.remove(uuid)
except KeyError:
result = False
else:
result = True
return result
def getHandler(self):
return self
def identifyStorageNode(self, known):
"""
Returns the handler to manager the given node
"""
if known:
state = NodeStates.RUNNING
else:
# if node is unknown, it has been forget when the current
# partition was validated by the admin
# Here the uuid is not cleared to allow lookup pending nodes by
# uuid from the test framework. It's safe since nodes with a
# conflicting UUID are rejected in the identification handler.
state = NodeStates.PENDING
return state, self
def run(self):
self.app.changeClusterState(ClusterStates.VERIFYING)
while True:
try:
self.verifyData()
except VerificationFailure:
continue
break
# At this stage, all non-working nodes are out-of-date.
self.app.broadcastPartitionChanges(self.app.pt.outdate())
def verifyData(self):
"""Verify the data in storage nodes and clean them up, if necessary."""
app = self.app
# wait for any missing node
logging.debug('waiting for the cluster to be operational')
while not app.pt.operational():
app.em.poll(1)
if app.backup_tid:
return
logging.info('start to verify data')
getIdentifiedList = app.nm.getIdentifiedList
# Gather all unfinished transactions.
self._askStorageNodesAndWait(Packets.AskUnfinishedTransactions(),
[x for x in getIdentifiedList() if x.isStorage()])
# Gather OIDs for each unfinished TID, and verify whether the
# transaction can be finished or must be aborted. This could be
# in parallel in theory, but not so easy. Thus do it one-by-one
# at the moment.
for tid in self._tid_set:
uuid_set = self.verifyTransaction(tid)
if uuid_set is None:
packet = Packets.DeleteTransaction(tid, self._oid_set or [])
# Make sure that no node has this transaction.
for node in getIdentifiedList():
if node.isStorage():
node.notify(packet)
else:
packet = Packets.CommitTransaction(tid)
for node in getIdentifiedList(pool_set=uuid_set):
node.notify(packet)
self._oid_set = set()
# If possible, send the packets now.
app.em.poll(0)
def verifyTransaction(self, tid):
nm = self.app.nm
uuid_set = set()
# Determine to which nodes I should ask.
partition = self.app.pt.getPartition(tid)
uuid_list = [cell.getUUID() for cell \
in self.app.pt.getCellList(partition, readable=True)]
if len(uuid_list) == 0:
raise VerificationFailure
uuid_set.update(uuid_list)
# Gather OIDs.
node_list = self.app.nm.getIdentifiedList(pool_set=uuid_list)
if len(node_list) == 0:
raise VerificationFailure
self._askStorageNodesAndWait(Packets.AskTransactionInformation(tid),
node_list)
if self._oid_set is None or len(self._oid_set) == 0:
# Not commitable.
return None
# Verify that all objects are present.
for oid in self._oid_set:
partition = self.app.pt.getPartition(oid)
object_uuid_list = [cell.getUUID() for cell \
in self.app.pt.getCellList(partition, readable=True)]
if len(object_uuid_list) == 0:
raise VerificationFailure
uuid_set.update(object_uuid_list)
self._object_present = True
self._askStorageNodesAndWait(Packets.AskObjectPresent(oid, tid),
nm.getIdentifiedList(pool_set=object_uuid_list))
if not self._object_present:
# Not commitable.
return None
return uuid_set
def answerUnfinishedTransactions(self, conn, max_tid, tid_list):
uuid = conn.getUUID()
logging.info('got unfinished transactions %s from %r',
map(dump, tid_list), conn)
if not self._gotAnswerFrom(uuid):
return
self._tid_set.update(tid_list)
def answerTransactionInformation(self, conn, tid,
user, desc, ext, packed, oid_list):
uuid = conn.getUUID()
if not self._gotAnswerFrom(uuid):
return
oid_set = set(oid_list)
if self._oid_set is None:
# Someone does not agree.
pass
elif len(self._oid_set) == 0:
# This is the first answer.
self._oid_set.update(oid_set)
elif self._oid_set != oid_set:
raise ValueError, "Inconsistent transaction %s" % \
(dump(tid, ))
def tidNotFound(self, conn, message):
uuid = conn.getUUID()
logging.info('TID not found: %s', message)
if not self._gotAnswerFrom(uuid):
return
self._oid_set = None
def answerObjectPresent(self, conn, oid, tid):
uuid = conn.getUUID()
logging.info('object %s:%s found', dump(oid), dump(tid))
self._gotAnswerFrom(uuid)
def oidNotFound(self, conn, message):
uuid = conn.getUUID()
logging.info('OID not found: %s', message)
if not self._gotAnswerFrom(uuid):
return
self.app._object_present = False
def connectionCompleted(self, conn):
pass
def nodeLost(self, conn, node):
if not self.app.pt.operational():
raise VerificationFailure, 'cannot continue verification'
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/neoctl/ 0000775 0000000 0000000 00000000000 12601037530 0023141 5 ustar 00root root 0000000 0000000 neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/neoctl/__init__.py 0000664 0000000 0000000 00000000000 12601037530 0025240 0 ustar 00root root 0000000 0000000 neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/neoctl/app.py 0000664 0000000 0000000 00000024770 12601037530 0024305 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2006-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
from operator import itemgetter
from .neoctl import NeoCTL, NotReadyException
from neo.lib.util import p64, u64, tidFromTime
from neo.lib.protocol import uuid_str, ClusterStates, NodeTypes, \
UUID_NAMESPACES, ZERO_TID
action_dict = {
'print': {
'ids': 'getLastIds',
'pt': 'getPartitionRowList',
'node': 'getNodeList',
'cluster': 'getClusterState',
'primary': 'getPrimary',
},
'set': {
'cluster': 'setClusterState',
},
'check': 'checkReplicas',
'start': 'startCluster',
'add': 'enableStorageList',
'tweak': 'tweakPartitionTable',
'drop': 'dropNode',
'kill': 'killNode',
}
uuid_int = (lambda ns: lambda uuid:
(ns[uuid[0]] << 24) + int(uuid[1:])
)({str(k)[0]: v for k, v in UUID_NAMESPACES.iteritems()})
class TerminalNeoCTL(object):
def __init__(self, address):
self.neoctl = NeoCTL(address)
def __del__(self):
self.neoctl.close()
# Utility methods (could be functions)
def asNodeType(self, value):
return getattr(NodeTypes, value.upper())
def asClusterState(self, value):
return getattr(ClusterStates, value.upper())
def asTID(self, value):
if '.' in value:
return tidFromTime(float(value))
return p64(int(value, 0))
asNode = staticmethod(uuid_int)
def formatRowList(self, row_list):
return '\n'.join('%03d | %s' % (offset,
''.join('%s - %s |' % (uuid_str(uuid), state)
for (uuid, state) in cell_list))
for (offset, cell_list) in row_list)
def formatNodeList(self, node_list, _sort_key=itemgetter(2, 0, 1)):
if not node_list:
return 'Empty list!'
node_list.sort(key=_sort_key)
return '\n'.join(
'%s - %s - %s - %s' % (node_type, uuid_str(uuid),
address and '%s:%s' % address, state)
for node_type, address, uuid, state in node_list)
# Actual actions
def getLastIds(self, params):
"""
Get last ids.
"""
assert not params
r = self.neoctl.getLastIds()
if r[3]:
return "last_tid = 0x%x" % u64(self.neoctl.getLastTransaction())
return "last_oid = 0x%x\nlast_tid = 0x%x\nlast_ptid = %u" % (
u64(r[0]), u64(r[1]), r[2])
def getPartitionRowList(self, params):
"""
Get a list of partition rows, bounded by min & max and involving
given node.
Parameters: [min [max [node]]]
min: offset of the first row to fetch (starts at 0)
max: offset of the last row to fetch (0 for no limit)
node: filters the list of nodes serving a line to this node
"""
params = params + [0, 0, None][len(params):]
min_offset, max_offset, node = params
min_offset = int(min_offset)
max_offset = int(max_offset)
if node is not None:
node = self.asNode(node)
ptid, row_list = self.neoctl.getPartitionRowList(
min_offset=min_offset, max_offset=max_offset, node=node)
# TODO: return ptid
return self.formatRowList(row_list)
def getNodeList(self, params):
"""
Get a list of nodes, filtering with given type.
Parameters: [type]
type: type of node to display
"""
assert len(params) < 2
if len(params):
node_type = self.asNodeType(params[0])
else:
node_type = None
node_list = self.neoctl.getNodeList(node_type=node_type)
return self.formatNodeList(node_list)
def getClusterState(self, params):
"""
Get cluster state.
"""
assert len(params) == 0
return str(self.neoctl.getClusterState())
def setClusterState(self, params):
"""
Set cluster state.
Parameters: state
state: state to put the cluster in
"""
assert len(params) == 1
return self.neoctl.setClusterState(self.asClusterState(params[0]))
def startCluster(self, params):
"""
Starts cluster operation after a startup.
Equivalent to:
set cluster verifying
"""
assert len(params) == 0
return self.neoctl.startCluster()
def enableStorageList(self, params):
"""
Enable cluster to make use of pending storages.
Parameters: all
node [node [...]]
node: if "all", add all pending storage nodes.
otherwise, the list of storage nodes to enable.
"""
if len(params) == 1 and params[0] == 'all':
node_list = self.neoctl.getNodeList(NodeTypes.STORAGE)
uuid_list = [node[2] for node in node_list]
else:
uuid_list = map(self.asNode, params)
return self.neoctl.enableStorageList(uuid_list)
def tweakPartitionTable(self, params):
"""
Optimize partition table.
No partitition will be assigned to specified storage nodes.
Parameters: [node [...]]
"""
return self.neoctl.tweakPartitionTable(map(self.asNode, params))
def killNode(self, params):
"""
Kill redundant nodes (either a storage or a secondary master).
Parameters: node
"""
return self.neoctl.killNode(self.asNode(*params))
def dropNode(self, params):
"""
Remove storage node permanently.
Parameters: node
"""
return self.neoctl.dropNode(self.asNode(*params))
def getPrimary(self, params):
"""
Get primary master node.
"""
return uuid_str(self.neoctl.getPrimary())
def checkReplicas(self, params):
"""
Test whether partitions have corrupted metadata
Any corrupted cell is put in CORRUPTED state, possibly make the
cluster non operational.
Parameters: [partition]:[reference] ... [min_tid [max_tid]]
reference: node id of a storage with known good data
If not given, and if the cluster is in backup mode, an upstream
cell is automatically taken as reference.
"""
partition_dict = {}
params = iter(params)
min_tid = ZERO_TID
max_tid = None
for p in params:
try:
partition, source = p.split(':')
except ValueError:
min_tid = self.asTID(p)
try:
max_tid = self.asTID(params.next())
except StopIteration:
pass
break
source = self.asNode(source) if source else None
if partition:
partition_dict[int(partition)] = source
else:
assert not partition_dict
np = len(self.neoctl.getPartitionRowList()[1])
partition_dict = dict.fromkeys(xrange(np), source)
self.neoctl.checkReplicas(partition_dict, min_tid, max_tid)
class Application(object):
"""The storage node application."""
def __init__(self, address):
self.neoctl = TerminalNeoCTL(address)
def execute(self, args):
"""Execute the command given."""
# print node type : print list of node of the given type
# (STORAGE_NODE_TYPE, MASTER_NODE_TYPE...)
# set node uuid state [1|0] : set the node for the given uuid to the
# state (RUNNING, DOWN...) and modify the partition if asked
# set cluster name [shutdown|operational] : either shutdown the
# cluster or mark it as operational
current_action = action_dict
level = 0
while current_action is not None and \
level < len(args) and \
isinstance(current_action, dict):
current_action = current_action.get(args[level])
level += 1
action = None
if isinstance(current_action, basestring):
action = getattr(self.neoctl, current_action, None)
if action is None:
return self.usage('unknown command')
try:
return action(args[level:])
except NotReadyException, message:
return 'ERROR: %s' % (message, )
def _usage(self, action_dict, level=0):
result = []
append = result.append
sub_level = level + 1
for name, action in action_dict.iteritems():
append('%s%s' % (' ' * level, name))
if isinstance(action, dict):
append(self._usage(action, level=sub_level))
else:
real_action = getattr(self.neoctl, action, None)
if real_action is None:
continue
docstring = getattr(real_action, '__doc__', None)
if docstring is None:
docstring = '(no docstring)'
docstring_line_list = docstring.split('\n')
# Strip empty lines at begining & end of line list
for end in (0, -1):
while len(docstring_line_list) \
and docstring_line_list[end] == '':
docstring_line_list.pop(end)
# Get the indentation of first line, to preserve other lines
# relative indentation.
first_line = docstring_line_list[0]
base_indentation = len(first_line) - len(first_line.lstrip())
result.extend([(' ' * sub_level) + x[base_indentation:] \
for x in docstring_line_list])
return '\n'.join(result)
def usage(self, message):
output_list = (message, 'Available commands:', self._usage(action_dict),
"TID arguments can be either integers or timestamps as floats,"
" e.g. '257684787499560686', '0x3937af2eeeeeeee' or '1325421296.'"
" for 2012-01-01 12:34:56 UTC")
return '\n'.join(output_list)
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/neoctl/handler.py 0000664 0000000 0000000 00000004364 12601037530 0025137 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2009-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
from neo.lib.handler import EventHandler
from neo.lib.protocol import ErrorCodes, Packets
class CommandEventHandler(EventHandler):
""" Base handler for command """
def connectionCompleted(self, conn):
# connected to admin node
self.app.connected = True
super(CommandEventHandler, self).connectionCompleted(conn)
def __disconnected(self):
app = self.app
app.connected = False
app.connection = None
def __respond(self, response):
self.app.response_queue.append(response)
def connectionClosed(self, conn):
super(CommandEventHandler, self).connectionClosed(conn)
self.__disconnected()
def connectionFailed(self, conn):
super(CommandEventHandler, self).connectionFailed(conn)
self.__disconnected()
def ack(self, conn, msg):
self.__respond((Packets.Error, ErrorCodes.ACK, msg))
def protocolError(self, conn, msg):
self.__respond((Packets.Error, ErrorCodes.PROTOCOL_ERROR, msg))
def notReady(self, conn, msg):
self.__respond((Packets.Error, ErrorCodes.NOT_READY, msg))
def __answer(packet_type):
def answer(self, conn, *args):
self.__respond((packet_type, ) + args)
return answer
answerPartitionList = __answer(Packets.AnswerPartitionList)
answerNodeList = __answer(Packets.AnswerNodeList)
answerClusterState = __answer(Packets.AnswerClusterState)
answerPrimary = __answer(Packets.AnswerPrimary)
answerLastIDs = __answer(Packets.AnswerLastIDs)
answerLastTransaction = __answer(Packets.AnswerLastTransaction)
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/neoctl/neoctl.py 0000664 0000000 0000000 00000013632 12601037530 0025004 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2006-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
from neo.lib.app import BaseApplication
from neo.lib.connection import ClientConnection
from neo.lib.protocol import ClusterStates, NodeStates, ErrorCodes, Packets
from .handler import CommandEventHandler
class NotReadyException(Exception):
pass
class NeoCTL(BaseApplication):
connection = None
connected = False
def __init__(self, address):
super(NeoCTL, self).__init__()
self.server = self.nm.createAdmin(address=address)
self.handler = CommandEventHandler(self)
self.response_queue = []
def __getConnection(self):
if not self.connected:
self.connection = ClientConnection(self.em, self.handler,
self.server)
# Never delay reconnection to master. This speeds up unit tests
# and it should not change anything for normal use.
self.connection.setReconnectionNoDelay()
while not self.connected:
self.em.poll(1)
if self.connection is None:
raise NotReadyException('not connected')
return self.connection
def __ask(self, packet):
# TODO: make thread-safe
connection = self.__getConnection()
connection.ask(packet)
response_queue = self.response_queue
assert len(response_queue) == 0
while self.connected:
self.em.poll(1)
if response_queue:
break
else:
raise NotReadyException, 'Connection closed'
response = response_queue.pop()
if response[0] == Packets.Error and \
response[1] == ErrorCodes.NOT_READY:
raise NotReadyException(response[2])
return response
def enableStorageList(self, uuid_list):
"""
Put all given storage nodes in "running" state.
"""
packet = Packets.AddPendingNodes(uuid_list)
response = self.__ask(packet)
if response[0] != Packets.Error or response[1] != ErrorCodes.ACK:
raise RuntimeError(response)
return response[2]
def tweakPartitionTable(self, uuid_list=()):
response = self.__ask(Packets.TweakPartitionTable(uuid_list))
if response[0] != Packets.Error or response[1] != ErrorCodes.ACK:
raise RuntimeError(response)
return response[2]
def setClusterState(self, state):
"""
Set cluster state.
"""
packet = Packets.SetClusterState(state)
response = self.__ask(packet)
if response[0] != Packets.Error or response[1] != ErrorCodes.ACK:
raise RuntimeError(response)
return response[2]
def _setNodeState(self, node, state):
"""
Kill node, or remove it permanently
"""
response = self.__ask(Packets.SetNodeState(node, state))
if response[0] != Packets.Error or response[1] != ErrorCodes.ACK:
raise RuntimeError(response)
return response[2]
def getClusterState(self):
"""
Get cluster state.
"""
packet = Packets.AskClusterState()
response = self.__ask(packet)
if response[0] != Packets.AnswerClusterState:
raise RuntimeError(response)
return response[1]
def getLastIds(self):
response = self.__ask(Packets.AskLastIDs())
if response[0] != Packets.AnswerLastIDs:
raise RuntimeError(response)
return response[1:]
def getLastTransaction(self):
response = self.__ask(Packets.AskLastTransaction())
if response[0] != Packets.AnswerLastTransaction:
raise RuntimeError(response)
return response[1]
def getNodeList(self, node_type=None):
"""
Get a list of nodes, filtering with given type.
"""
packet = Packets.AskNodeList(node_type)
response = self.__ask(packet)
if response[0] != Packets.AnswerNodeList:
raise RuntimeError(response)
return response[1] # node_list
def getPartitionRowList(self, min_offset=0, max_offset=0, node=None):
"""
Get a list of partition rows, bounded by min & max and involving
given node.
"""
packet = Packets.AskPartitionList(min_offset, max_offset, node)
response = self.__ask(packet)
if response[0] != Packets.AnswerPartitionList:
raise RuntimeError(response)
return response[1:3] # ptid, row_list
def startCluster(self):
"""
Set cluster into "verifying" state.
"""
return self.setClusterState(ClusterStates.VERIFYING)
def killNode(self, node):
return self._setNodeState(node, NodeStates.UNKNOWN)
def dropNode(self, node):
return self._setNodeState(node, NodeStates.DOWN)
def getPrimary(self):
"""
Return the primary master UUID.
"""
packet = Packets.AskPrimary()
response = self.__ask(packet)
if response[0] != Packets.AnswerPrimary:
raise RuntimeError(response)
return response[1]
def checkReplicas(self, *args):
response = self.__ask(Packets.CheckReplicas(*args))
if response[0] != Packets.Error or response[1] != ErrorCodes.ACK:
raise RuntimeError(response)
return response[2]
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/scripts/ 0000775 0000000 0000000 00000000000 12601037530 0023344 5 ustar 00root root 0000000 0000000 neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/scripts/__init__.py 0000664 0000000 0000000 00000000000 12601037530 0025443 0 ustar 00root root 0000000 0000000 neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/scripts/neoadmin.py 0000775 0000000 0000000 00000004304 12601037530 0025514 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
#
# neoadmin - run an administrator node of NEO
#
# Copyright (C) 2009-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
from optparse import OptionParser
from neo.lib import logging
from neo.lib.config import ConfigurationManager
parser = OptionParser()
parser.add_option('-u', '--uuid', help='specify an UUID to use for this ' \
'process')
parser.add_option('-f', '--file', help = 'specify a configuration file')
parser.add_option('-s', '--section', help = 'specify a configuration section')
parser.add_option('-l', '--logfile', help = 'specify a logging file')
parser.add_option('-c', '--cluster', help = 'the cluster name')
parser.add_option('-m', '--masters', help = 'master node list')
parser.add_option('-b', '--bind', help = 'the local address to bind to')
parser.add_option('-D', '--dynamic-master-list', help='path of the file '
'containing dynamic master node list')
defaults = dict(
bind = '127.0.0.1:9999',
masters = '127.0.0.1:10000',
)
def main(args=None):
# build configuration dict from command line options
(options, args) = parser.parse_args(args=args)
arguments = dict(
uuid = options.uuid,
cluster = options.cluster,
masters = options.masters,
bind = options.bind,
)
config = ConfigurationManager(
defaults,
options.file,
options.section or 'admin',
arguments,
)
# setup custom logging
logging.setup(options.logfile)
# and then, load and run the application
from neo.admin.app import Application
app = Application(config)
app.run()
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/scripts/neoctl.py 0000775 0000000 0000000 00000002721 12601037530 0025207 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
#
# neoadmin - run an administrator node of NEO
#
# Copyright (C) 2009-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
from optparse import OptionParser
from neo.lib import logging
from neo.lib.util import parseNodeAddress
parser = OptionParser()
parser.add_option('-a', '--address', help = 'specify the address (ip:port) ' \
'of an admin node', default = '127.0.0.1:9999')
parser.add_option('--handler', help = 'specify the connection handler')
parser.add_option('-l', '--logfile', help = 'specify a logging file')
def main(args=None):
(options, args) = parser.parse_args(args=args)
if options.address is not None:
address = parseNodeAddress(options.address, 9999)
else:
address = ('127.0.0.1', 9999)
logging.setup(options.logfile)
from neo.neoctl.app import Application
print Application(address).execute(args)
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/scripts/neolog.py 0000775 0000000 0000000 00000020615 12601037530 0025210 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
#
# neolog - read a NEO log
#
# Copyright (C) 2012-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import bz2, gzip, errno, optparse, os, signal, sqlite3, sys, time
from bisect import insort
from logging import getLevelName
comp_dict = dict(bz2=bz2.BZ2File, gz=gzip.GzipFile)
class Log(object):
_log_id = _packet_id = -1
_protocol_date = None
def __init__(self, db_path, decode_all=False, date_format=None,
filter_from=None):
self._date_format = '%F %T' if date_format is None else date_format
self._decode_all = decode_all
self._filter_from = filter_from
name = os.path.basename(db_path)
try:
name, ext = name.rsplit(os.extsep, 1)
ZipFile = comp_dict[ext]
except (KeyError, ValueError):
# WKRD: Python does not support URI so we can't open in read-only
# mode. See http://bugs.python.org/issue13773
os.stat(db_path) # do not create empty DB if file is missing
self._db = sqlite3.connect(db_path)
else:
import shutil, tempfile
with tempfile.NamedTemporaryFile() as f:
shutil.copyfileobj(ZipFile(db_path), f)
self._db = sqlite3.connect(f.name)
name = name.rsplit(os.extsep, 1)[0]
self._default_name = name
def __iter__(self):
db = self._db
try:
db.execute("BEGIN")
yield
nl = "SELECT * FROM log WHERE id>?"
np = "SELECT * FROM packet WHERE id>?"
date = self._filter_from
if date:
date = " AND date>=%f" % date
nl += date
np += date
nl = db.execute(nl, (self._log_id,))
np = db.execute(np, (self._packet_id,))
try:
p = np.next()
self._reload(p[1])
except StopIteration:
p = None
for self._log_id, date, name, level, pathname, lineno, msg in nl:
while p and p[1] < date:
yield self._packet(*p)
p = np.fetchone()
yield date, name, getLevelName(level), msg.splitlines()
if p:
yield self._packet(*p)
for p in np:
yield self._packet(*p)
finally:
db.rollback()
def _reload(self, date):
q = self._db.execute
date, text = q("SELECT * FROM protocol WHERE date<=?"
" ORDER BY date DESC", (date,)).next()
if self._protocol_date == date:
return
self._protocol_date = date
g = {}
exec bz2.decompress(text) in g
for x in 'uuid_str', 'Packets', 'PacketMalformedError':
setattr(self, x, g[x])
try:
self._next_protocol, = q("SELECT date FROM protocol WHERE date>?",
(date,)).next()
except StopIteration:
self._next_protocol = float('inf')
def _emit(self, date, name, levelname, msg_list):
prefix = self._date_format
if prefix:
d = int(date)
prefix = '%s.%04u ' % (time.strftime(prefix, time.localtime(d)),
int((date - d) * 10000))
prefix += '%-9s %-10s ' % (levelname, name or self._default_name)
for msg in msg_list:
print prefix + msg
def _packet(self, id, date, name, msg_id, code, peer, body):
self._packet_id = id
if self._next_protocol <= date:
self._reload(date)
try:
p = self.Packets[code]
except KeyError:
Packets[code] = p = type('UnknownPacket[%u]' % code, (object,), {})
msg = ['#0x%04x %-30s %s' % (msg_id, p.__name__, peer)]
if body is not None:
logger = getattr(self, p.handler_method_name, None)
if logger or self._decode_all:
p = p()
p._id = msg_id
p._body = body
try:
args = p.decode()
except self.PacketMalformedError:
msg.append("Can't decode packet")
else:
if logger:
msg += logger(*args)
elif args:
msg = '%s \t| %r' % (msg[0], args),
return date, name, 'PACKET', msg
def error(self, code, message):
return "%s (%s)" % (code, message),
def notifyNodeInformation(self, node_list):
node_list.sort(key=lambda x: x[2])
node_list = [(self.uuid_str(uuid), str(node_type),
'%s:%u' % address if address else '?', state)
for node_type, address, uuid, state in node_list]
if node_list:
t = ' ! %%%us | %%%us | %%%us | %%s' % (
max(len(x[0]) for x in node_list),
max(len(x[1]) for x in node_list),
max(len(x[2]) for x in node_list))
return map(t.__mod__, node_list)
return ()
def emit_many(log_list):
log_list = [(log, iter(log).next) for log in log_list]
for x in log_list: # try to start all transactions at the same time
x[1]()
event_list = []
for log, next in log_list:
try:
event = next()
except StopIteration:
continue
event_list.append((-event[0], next, log._emit, event))
if event_list:
event_list.sort()
while True:
key, next, emit, event = event_list.pop()
try:
next_date = - event_list[-1][0]
except IndexError:
next_date = float('inf')
try:
while event[0] <= next_date:
emit(*event)
event = next()
except IOError, e:
if e.errno == errno.EPIPE:
sys.exit(1)
raise
except StopIteration:
if not event_list:
break
else:
insort(event_list, (-event[0], next, emit, event))
def main():
parser = optparse.OptionParser()
parser.add_option('-a', '--all', action="store_true",
help='decode all packets')
parser.add_option('-d', '--date', metavar='FORMAT',
help='custom date format, according to strftime(3)')
parser.add_option('-f', '--follow', action="store_true",
help='output appended data as the file grows')
parser.add_option('-F', '--flush', action="append", type="int",
help='with -f, tell process PID to flush logs approximately N'
' seconds (see -s)', metavar='PID')
parser.add_option('-s', '--sleep-interval', type="float", default=1,
help='with -f, sleep for approximately N seconds (default 1.0)'
' between iterations', metavar='N')
parser.add_option('--from', dest='filter_from', type="float",
help='show records more recent that timestamp N if N > 0,'
' or now+N if N < 0', metavar='N')
options, args = parser.parse_args()
if options.sleep_interval <= 0:
parser.error("sleep_interval must be positive")
if not args:
parser.error("no log specified")
filter_from = options.filter_from
if filter_from and filter_from < 0:
filter_from += time.time()
log_list = [Log(db_path, options.all, options.date, filter_from)
for db_path in args]
if options.follow:
try:
pid_list = options.flush or ()
while True:
emit_many(log_list)
for pid in pid_list:
os.kill(pid, signal.SIGRTMIN)
time.sleep(options.sleep_interval)
except KeyboardInterrupt:
pass
else:
emit_many(log_list)
if __name__ == "__main__":
main()
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/scripts/neomaster.py 0000775 0000000 0000000 00000005725 12601037530 0025727 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
#
# neomaster - run a master node of NEO
#
# Copyright (C) 2006-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
from optparse import OptionParser
from neo.lib import logging
from neo.lib.config import ConfigurationManager
parser = OptionParser()
parser.add_option('-f', '--file', help = 'specify a configuration file')
parser.add_option('-s', '--section', help = 'specify a configuration section')
parser.add_option('-u', '--uuid', help='the node UUID (testing purpose)')
parser.add_option('-b', '--bind', help = 'the local address to bind to')
parser.add_option('-c', '--cluster', help = 'the cluster name')
parser.add_option('-m', '--masters', help = 'master node list')
parser.add_option('-r', '--replicas', help = 'replicas number')
parser.add_option('-p', '--partitions', help = 'partitions number')
parser.add_option('-l', '--logfile', help = 'specify a logging file')
parser.add_option('-D', '--dynamic-master-list', help='path of the file '
'containing dynamic master node list')
parser.add_option('-A', '--autostart',
help='minimum number of pending storage nodes to automatically start'
' new cluster (to avoid unwanted recreation of the cluster,'
' this should be the total number of storage nodes)')
parser.add_option('-C', '--upstream-cluster',
help='the name of cluster to backup')
parser.add_option('-M', '--upstream-masters',
help='list of master nodes in cluster to backup')
defaults = dict(
bind = '127.0.0.1:10000',
masters = '',
replicas = 0,
partitions = 100,
)
def main(args=None):
# build configuration dict from command line options
(options, args) = parser.parse_args(args=args)
arguments = dict(
uuid = options.uuid or None,
bind = options.bind,
cluster = options.cluster,
masters = options.masters,
replicas = options.replicas,
partitions = options.partitions,
autostart = options.autostart,
upstream_cluster = options.upstream_cluster,
upstream_masters = options.upstream_masters,
)
config = ConfigurationManager(
defaults,
options.file,
options.section or 'master',
arguments,
)
# setup custom logging
logging.setup(options.logfile)
# and then, load and run the application
from neo.master.app import Application
app = Application(config)
app.run()
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/scripts/neomigrate.py 0000775 0000000 0000000 00000005304 12601037530 0026055 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
#
# neomaster - run a master node of NEO
#
# Copyright (C) 2006-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
from optparse import OptionParser
import time
import os
# register options
parser = OptionParser()
parser.add_option('-l', '--logfile',
help = 'log debugging information to specified SQLite DB')
parser.add_option('-s', '--source', help = 'the source database')
parser.add_option('-d', '--destination', help = 'the destination database')
parser.add_option('-c', '--cluster', help = 'the NEO cluster name')
def main(args=None):
# parse options
(options, args) = parser.parse_args(args=args)
source = options.source or None
destination = options.destination or None
cluster = options.cluster or None
# check options
if source is None or destination is None:
raise RuntimeError('Source and destination databases must be supplied')
if cluster is None:
raise RuntimeError('The NEO cluster name must be supplied')
# open storages
from ZODB.FileStorage import FileStorage
from neo.client.Storage import Storage as NEOStorage
if os.path.exists(source):
print("WARNING: This is not the recommended way to import data to NEO:"
" you should use Imported backend instead.\n"
"NEO also does not implement IStorageRestoreable interface,"
" which means that undo information is not preserved when using"
" this tool: conflict resolution could happen when undoing an"
" old transaction.")
src = FileStorage(file_name=source, read_only=True)
dst = NEOStorage(master_nodes=destination, name=cluster,
logfile=options.logfile)
else:
src = NEOStorage(master_nodes=source, name=cluster,
logfile=options.logfile, read_only=True)
dst = FileStorage(file_name=destination)
# do the job
print "Migrating from %s to %s" % (source, destination)
start = time.time()
dst.copyTransactionsFrom(src)
elapsed = time.time() - start
print "Migration done in %3.5f" % (elapsed, )
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/scripts/neostorage.py 0000775 0000000 0000000 00000006317 12601037530 0026076 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
#
# neostorage - run a storage node of NEO
#
# Copyright (C) 2006-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
from optparse import OptionParser
from neo.lib import logging
from neo.lib.config import ConfigurationManager
parser = OptionParser()
parser.add_option('-u', '--uuid', help='specify an UUID to use for this ' \
'process. Previously assigned UUID takes precedence (ie ' \
'you should always use -R with this switch)')
parser.add_option('-f', '--file', help = 'specify a configuration file')
parser.add_option('-s', '--section', help = 'specify a configuration section')
parser.add_option('-l', '--logfile', help = 'specify a logging file')
parser.add_option('-R', '--reset', action = 'store_true',
help = 'remove an existing database if any')
parser.add_option('-b', '--bind', help = 'the local address to bind to')
parser.add_option('-c', '--cluster', help = 'the cluster name')
parser.add_option('-m', '--masters', help = 'master node list')
parser.add_option('-a', '--adapter', help = 'database adapter to use')
parser.add_option('-d', '--database', help = 'database connections string')
parser.add_option('-e', '--engine', help = 'database engine')
parser.add_option('-D', '--dynamic-master-list', help='path of the file '
'containing dynamic master node list')
parser.add_option('-w', '--wait', help='seconds to wait for backend to be '
'available, before erroring-out (-1 = infinite)', type='float', default=0)
defaults = dict(
bind = '127.0.0.1',
masters = '127.0.0.1:10000',
adapter = 'MySQL',
)
def main(args=None):
# TODO: Forbid using "reset" along with any unneeded argument.
# "reset" is too dangerous to let user a chance of accidentally
# letting it slip through in a long option list.
# We should drop support configation files to make such check useful.
(options, args) = parser.parse_args(args=args)
arguments = dict(
uuid = options.uuid,
bind = options.bind,
cluster = options.cluster,
masters = options.masters,
database = options.database,
engine = options.engine,
reset = options.reset,
adapter = options.adapter,
wait = options.wait,
)
config = ConfigurationManager(
defaults,
options.file,
options.section or 'storage',
arguments,
)
# setup custom logging
logging.setup(options.logfile)
# and then, load and run the application
from neo.storage.app import Application
app = Application(config)
if not config.getReset():
app.run()
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/scripts/runner.py 0000775 0000000 0000000 00000024320 12601037530 0025233 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
#
# Copyright (C) 2009-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import traceback
import unittest
import logging
import time
import sys
import neo
import os
from collections import Counter, defaultdict
from cStringIO import StringIO
from unittest.runner import _WritelnDecorator
from neo.tests import getTempDirectory, __dict__ as neo_tests__dict__
from neo.tests.benchmark import BenchmarkRunner
# list of test modules
# each of them have to import its TestCase classes
UNIT_TEST_MODULES = [
# generic parts
'neo.tests.testBootstrap',
'neo.tests.testConnection',
'neo.tests.testHandler',
'neo.tests.testNodes',
'neo.tests.testDispatcher',
'neo.tests.testUtil',
'neo.tests.testPT',
# master application
'neo.tests.master.testClientHandler',
'neo.tests.master.testElectionHandler',
'neo.tests.master.testMasterApp',
'neo.tests.master.testMasterPT',
'neo.tests.master.testRecovery',
'neo.tests.master.testStorageHandler',
'neo.tests.master.testVerification',
'neo.tests.master.testTransactions',
# storage application
'neo.tests.storage.testClientHandler',
'neo.tests.storage.testInitializationHandler',
'neo.tests.storage.testMasterHandler',
'neo.tests.storage.testStorageApp',
'neo.tests.storage.testStorage' + os.getenv('NEO_TESTS_ADAPTER', 'SQLite'),
'neo.tests.storage.testVerificationHandler',
'neo.tests.storage.testIdentificationHandler',
'neo.tests.storage.testTransactions',
# client application
'neo.tests.client.testClientApp',
'neo.tests.client.testMasterHandler',
'neo.tests.client.testStorageHandler',
'neo.tests.client.testConnectionPool',
# light functional tests
'neo.tests.threaded.test',
'neo.tests.threaded.testImporter',
'neo.tests.threaded.testReplication',
]
FUNC_TEST_MODULES = [
'neo.tests.functional.testMaster',
'neo.tests.functional.testClient',
'neo.tests.functional.testCluster',
'neo.tests.functional.testStorage',
]
ZODB_TEST_MODULES = [
('neo.tests.zodb.testBasic', 'check'),
('neo.tests.zodb.testConflict', 'check'),
('neo.tests.zodb.testHistory', 'check'),
('neo.tests.zodb.testIterator', 'check'),
('neo.tests.zodb.testMT', 'check'),
('neo.tests.zodb.testPack', 'check'),
('neo.tests.zodb.testPersistent', 'check'),
('neo.tests.zodb.testReadOnly', 'check'),
('neo.tests.zodb.testRevision', 'check'),
#('neo.tests.zodb.testRecovery', 'check'),
('neo.tests.zodb.testSynchronization', 'check'),
# ('neo.tests.zodb.testVersion', 'check'),
('neo.tests.zodb.testUndo', 'check'),
('neo.tests.zodb.testZODB', 'check'),
]
class NeoTestRunner(unittest.TextTestResult):
""" Custom result class to build report with statistics per module """
def __init__(self, title, verbosity):
super(NeoTestRunner, self).__init__(
_WritelnDecorator(sys.stderr), False, verbosity)
self._title = title
self.modulesStats = {}
self.failedImports = {}
self.run_dict = defaultdict(int)
self.time_dict = defaultdict(int)
self.temp_directory = getTempDirectory()
def wasSuccessful(self):
return not (self.failures or self.errors or self.unexpectedSuccesses)
def run(self, name, modules):
print '\n', name
suite = unittest.TestSuite()
loader = unittest.defaultTestLoader
for test_module in modules:
# load prefix if supplied
if isinstance(test_module, tuple):
test_module, prefix = test_module
loader.testMethodPrefix = prefix
else:
loader.testMethodPrefix = 'test'
try:
test_module = __import__(test_module, globals(), locals(), ['*'])
except ImportError, err:
self.failedImports[test_module] = err
print "Import of %s failed : %s" % (test_module, err)
traceback.print_exc()
continue
suite.addTests(loader.loadTestsFromModule(test_module))
suite.run(self)
def startTest(self, test):
super(NeoTestRunner, self).startTest(test)
self.run_dict[test.__class__.__module__] += 1
self.start_time = time.time()
def stopTest(self, test):
self.time_dict[test.__class__.__module__] += \
time.time() - self.start_time
super(NeoTestRunner, self).stopTest(test)
def _buildSummary(self, add_status):
unexpected_count = len(self.errors) + len(self.failures) \
+ len(self.unexpectedSuccesses)
expected_count = len(self.expectedFailures)
success = self.testsRun - unexpected_count - expected_count
add_status('Directory', self.temp_directory)
if self.testsRun:
add_status('Status', '%.3f%%' % (success * 100.0 / self.testsRun))
for var in os.environ:
if var.startswith('NEO_TEST'):
add_status(var, os.environ[var])
# visual
header = "%25s | run | unexpected | expected | skipped | time \n" % 'Test Module'
separator = "%25s-+-------+------------+----------+---------+----------\n" % ('-' * 25)
format = "%25s | %3s | %3s | %3s | %3s | %6.2fs \n"
group_f = "%25s | | | | | \n"
# header
s = ' ' * 30 + ' NEO TESTS REPORT\n\n' + header + separator
group = None
unexpected = Counter(x[0].__class__.__module__
for x in (self.errors, self.failures)
for x in x)
unexpected.update(x.__class__.__module__
for x in self.unexpectedSuccesses)
expected = Counter(x[0].__class__.__module__
for x in self.expectedFailures)
skipped = Counter(x[0].__class__.__module__
for x in self.skipped)
total_time = 0
# for each test case
for k, v in sorted(self.run_dict.iteritems()):
# display group below its content
_group, name = k.rsplit('.', 1)
if _group != group:
if group:
s += separator + group_f % group + separator
group = _group
t = self.time_dict[k]
total_time += t
s += format % (name.lstrip('test'), v, unexpected.get(k, '.'),
expected.get(k, '.'), skipped.get(k, '.'), t)
# the last group
s += separator + group_f % group + separator
# the final summary
s += format % ("Summary", self.testsRun, unexpected_count or '.',
expected_count or '.', len(self.skipped) or '.',
total_time) + separator + '\n'
return "%s Tests, %s Failed" % (self.testsRun, unexpected_count), s
def buildReport(self, add_status):
subject, summary = self._buildSummary(add_status)
body = StringIO()
body.write(summary)
for test in self.unexpectedSuccesses:
body.write("UNEXPECTED SUCCESS: %s\n" % self.getDescription(test))
self.stream = _WritelnDecorator(body)
self.printErrors()
return subject, body.getvalue()
class TestRunner(BenchmarkRunner):
def add_options(self, parser):
parser.add_option('-f', '--functional', action='store_true',
help='Functional tests')
parser.add_option('-u', '--unit', action='store_true',
help='Unit & threaded tests')
parser.add_option('-z', '--zodb', action='store_true',
help='ZODB test suite running on a NEO')
parser.add_option('-v', '--verbose', action='store_true',
help='Verbose output')
parser.format_epilog = lambda _: """
Environment Variables:
NEO_TESTS_ADAPTER Default is SQLite for threaded clusters,
MySQL otherwise.
MySQL specific:
NEO_DB_PREFIX default: %(DB_PREFIX)s
NEO_DB_ADMIN default: %(DB_ADMIN)s
NEO_DB_PASSWD default: %(DB_PASSWD)s
NEO_DB_USER default: %(DB_USER)s
ZODB tests:
NEO_TEST_ZODB_FUNCTIONAL Clusters are threaded by default. If true,
they are built like in functional tests.
NEO_TEST_ZODB_MASTERS default: 1
NEO_TEST_ZODB_PARTITIONS default: 1
NEO_TEST_ZODB_REPLICAS default: 0
NEO_TEST_ZODB_STORAGES default: 1
""" % neo_tests__dict__
def load_options(self, options, args):
if not (options.unit or options.functional or options.zodb or args):
sys.exit('Nothing to run, please give one of -f, -u, -z')
return dict(
unit = options.unit,
functional = options.functional,
zodb = options.zodb,
verbosity = 2 if options.verbose else 1,
)
def start(self):
config = self._config
# run requested tests
runner = NeoTestRunner(config.title or 'Neo', config.verbosity)
try:
if config.unit:
runner.run('Unit tests', UNIT_TEST_MODULES)
if config.functional:
runner.run('Functional tests', FUNC_TEST_MODULES)
if config.zodb:
runner.run('ZODB tests', ZODB_TEST_MODULES)
except KeyboardInterrupt:
config['mail_to'] = None
traceback.print_exc()
# build report
self._successful = runner.wasSuccessful()
return runner.buildReport(self.add_status)
def main(args=None):
runner = TestRunner()
runner.run()
return sys.exit(not runner.was_successful())
if __name__ == "__main__":
main()
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/scripts/simple.py 0000664 0000000 0000000 00000004304 12601037530 0025210 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
#
# Copyright (C) 2011-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import inspect, random, signal, sys
from logging import getLogger, INFO
from optparse import OptionParser
from neo.lib import logging
from neo.tests import functional
logging.backlog()
del logging.default_root_handler.handle
def main():
args, _, _, defaults = inspect.getargspec(functional.NEOCluster.__init__)
option_list = zip(args[-len(defaults):], defaults)
parser = OptionParser(usage="%prog [options] [db...]",
description="Quickly setup a simple NEO cluster for testing purpose.")
parser.add_option('--seed', help="settings like node ports/uuids and"
" cluster name are random: pass any string to initialize the RNG")
defaults = {}
for option, default in sorted(option_list):
kw = {}
if type(default) is bool:
kw['action'] = "store_true"
defaults[option] = False
elif default is not None:
defaults[option] = default
if isinstance(default, int):
kw['type'] = "int"
parser.add_option('--' + option, **kw)
parser.set_defaults(**defaults)
options, args = parser.parse_args()
if options.seed:
functional.random = random.Random(options.seed)
getLogger().setLevel(INFO)
cluster = functional.NEOCluster(args, **{x: getattr(options, x)
for x, _ in option_list})
try:
cluster.run()
logging.info("Cluster running ...")
cluster.waitAll()
finally:
cluster.stop()
if __name__ == "__main__":
main()
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/storage/ 0000775 0000000 0000000 00000000000 12601037530 0023321 5 ustar 00root root 0000000 0000000 neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/storage/__init__.py 0000664 0000000 0000000 00000000000 12601037530 0025420 0 ustar 00root root 0000000 0000000 neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/storage/app.py 0000664 0000000 0000000 00000034223 12601037530 0024457 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2006-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import sys
from collections import deque
from neo.lib import logging
from neo.lib.app import BaseApplication
from neo.lib.protocol import uuid_str, \
CellStates, ClusterStates, NodeTypes, Packets
from neo.lib.node import NodeManager
from neo.lib.connection import ListeningConnection
from neo.lib.exception import OperationFailure, PrimaryFailure
from neo.lib.pt import PartitionTable
from neo.lib.util import dump
from neo.lib.bootstrap import BootstrapManager
from .checker import Checker
from .database import buildDatabaseManager
from .exception import AlreadyPendingError
from .handlers import identification, verification, initialization
from .handlers import master, hidden
from .replicator import Replicator
from .transactions import TransactionManager
from neo.lib.debug import register as registerLiveDebugger
class Application(BaseApplication):
"""The storage node application."""
def __init__(self, config):
super(Application, self).__init__(config.getDynamicMasterList())
# set the cluster name
self.name = config.getCluster()
self.tm = TransactionManager(self)
self.dm = buildDatabaseManager(config.getAdapter(),
(config.getDatabase(), config.getEngine(), config.getWait()),
)
# load master nodes
for master_address in config.getMasters():
self.nm.createMaster(address=master_address)
# set the bind address
self.server = config.getBind()
logging.debug('IP address is %s, port is %d', *self.server)
# The partition table is initialized after getting the number of
# partitions.
self.pt = None
self.checker = Checker(self)
self.replicator = Replicator(self)
self.listening_conn = None
self.master_conn = None
self.master_node = None
# operation related data
self.event_queue = None
self.event_queue_dict = None
self.operational = False
# ready is True when operational and got all informations
self.ready = False
self.dm.setup(reset=config.getReset())
self.loadConfiguration()
# force node uuid from command line argument, for testing purpose only
if config.getUUID() is not None:
self.uuid = config.getUUID()
registerLiveDebugger(on_log=self.log)
def close(self):
self.listening_conn = None
self.dm.close()
super(Application, self).close()
def _poll(self):
self.em.poll(1)
def log(self):
self.em.log()
self.logQueuedEvents()
self.nm.log()
self.tm.log()
if self.pt is not None:
self.pt.log()
def loadConfiguration(self):
"""Load persistent configuration data from the database.
If data is not present, generate it."""
dm = self.dm
# check cluster name
name = dm.getName()
if name is None:
dm.setName(self.name)
elif name != self.name:
raise RuntimeError('name %r does not match with the database: %r'
% (self.name, name))
# load configuration
self.uuid = dm.getUUID()
num_partitions = dm.getNumPartitions()
num_replicas = dm.getNumReplicas()
ptid = dm.getPTID()
# check partition table configuration
if num_partitions is not None and num_replicas is not None:
if num_partitions <= 0:
raise RuntimeError, 'partitions must be more than zero'
# create a partition table
self.pt = PartitionTable(num_partitions, num_replicas)
logging.info('Configuration loaded:')
logging.info('UUID : %s', uuid_str(self.uuid))
logging.info('PTID : %s', dump(ptid))
logging.info('Name : %s', self.name)
logging.info('Partitions: %s', num_partitions)
logging.info('Replicas : %s', num_replicas)
def loadPartitionTable(self):
"""Load a partition table from the database."""
ptid = self.dm.getPTID()
cell_list = self.dm.getPartitionTable()
new_cell_list = []
for offset, uuid, state in cell_list:
# convert from int to Enum
state = CellStates[state]
# register unknown nodes
if self.nm.getByUUID(uuid) is None:
self.nm.createStorage(uuid=uuid)
new_cell_list.append((offset, uuid, state))
# load the partition table in manager
self.pt.clear()
self.pt.update(ptid, new_cell_list, self.nm)
def run(self):
try:
self._run()
except Exception:
logging.exception('Pre-mortem data:')
self.log()
logging.flush()
raise
def _run(self):
"""Make sure that the status is sane and start a loop."""
if len(self.name) == 0:
raise RuntimeError, 'cluster name must be non-empty'
# Make a listening port
handler = identification.IdentificationHandler(self)
self.listening_conn = ListeningConnection(self.em, handler, self.server)
self.server = self.listening_conn.getAddress()
# Connect to a primary master node, verify data, and
# start the operation. This cycle will be executed permanently,
# until the user explicitly requests a shutdown.
while True:
self.cluster_state = None
self.ready = False
self.operational = False
if self.master_node is None:
# look for the primary master
self.connectToPrimary()
# check my state
node = self.nm.getByUUID(self.uuid)
if node is not None and node.isHidden():
self.wait()
# drop any client node
for conn in self.em.getConnectionList():
if conn not in (self.listening_conn, self.master_conn):
conn.close()
# create/clear event queue
self.event_queue = deque()
self.event_queue_dict = {}
try:
self.verifyData()
self.initialize()
self.doOperation()
raise RuntimeError, 'should not reach here'
except OperationFailure, msg:
logging.error('operation stopped: %s', msg)
if self.cluster_state == ClusterStates.STOPPING_BACKUP:
self.dm.setBackupTID(None)
except PrimaryFailure, msg:
logging.error('primary master is down: %s', msg)
finally:
self.checker = Checker(self)
def connectToPrimary(self):
"""Find a primary master node, and connect to it.
If a primary master node is not elected or ready, repeat
the attempt of a connection periodically.
Note that I do not accept any connection from non-master nodes
at this stage."""
pt = self.pt
# First of all, make sure that I have no connection.
for conn in self.em.getConnectionList():
if not conn.isListening():
conn.close()
# search, find, connect and identify to the primary master
bootstrap = BootstrapManager(self, self.name,
NodeTypes.STORAGE, self.uuid, self.server)
data = bootstrap.getPrimaryConnection()
(node, conn, uuid, num_partitions, num_replicas) = data
self.master_node = node
self.master_conn = conn
logging.info('I am %s', uuid_str(uuid))
self.uuid = uuid
self.dm.setUUID(uuid)
# Reload a partition table from the database. This is necessary
# when a previous primary master died while sending a partition
# table, because the table might be incomplete.
if pt is not None:
self.loadPartitionTable()
if num_partitions != pt.getPartitions():
raise RuntimeError('the number of partitions is inconsistent')
if pt is None or pt.getReplicas() != num_replicas:
# changing number of replicas is not an issue
self.dm.setNumPartitions(num_partitions)
self.dm.setNumReplicas(num_replicas)
self.pt = PartitionTable(num_partitions, num_replicas)
self.loadPartitionTable()
def verifyData(self):
"""Verify data under the control by a primary master node.
Connections from client nodes may not be accepted at this stage."""
logging.info('verifying data')
handler = verification.VerificationHandler(self)
self.master_conn.setHandler(handler)
_poll = self._poll
while not self.operational:
_poll()
def initialize(self):
""" Retreive partition table and node informations from the primary """
logging.debug('initializing...')
_poll = self._poll
handler = initialization.InitializationHandler(self)
self.master_conn.setHandler(handler)
# ask node list and partition table
self.pt.clear()
self.master_conn.ask(Packets.AskNodeInformation())
self.master_conn.ask(Packets.AskPartitionTable())
while self.master_conn.isPending():
_poll()
self.ready = True
self.replicator.populate()
self.master_conn.notify(Packets.NotifyReady())
def doOperation(self):
"""Handle everything, including replications and transactions."""
logging.info('doing operation')
poll = self._poll
_poll = self.em._poll
isIdle = self.em.isIdle
handler = master.MasterOperationHandler(self)
self.master_conn.setHandler(handler)
# Forget all unfinished data.
self.dm.dropUnfinishedData()
self.tm.reset()
self.task_queue = task_queue = deque()
try:
self.dm.doOperation(self)
while True:
while task_queue:
try:
while isIdle():
if task_queue[-1].next():
_poll(0)
task_queue.rotate()
break
except StopIteration:
task_queue.pop()
poll()
finally:
del self.task_queue
# XXX: Although no handled exception should happen between
# replicator.populate() and the beginning of this 'try'
# clause, the replicator should be reset in a safer place.
self.replicator = Replicator(self)
# Abort any replication, whether we are feeding or out-of-date.
for node in self.nm.getStorageList(only_identified=True):
node.getConnection().close()
def changeClusterState(self, state):
self.cluster_state = state
if state == ClusterStates.STOPPING_BACKUP:
self.replicator.stop()
def wait(self):
# change handler
logging.info("waiting in hidden state")
_poll = self._poll
handler = hidden.HiddenHandler(self)
for conn in self.em.getConnectionList():
conn.setHandler(handler)
node = self.nm.getByUUID(self.uuid)
while True:
_poll()
if not node.isHidden():
break
def queueEvent(self, some_callable, conn=None, args=(), key=None,
raise_on_duplicate=True):
event_queue_dict = self.event_queue_dict
n = event_queue_dict.get(key)
if n and raise_on_duplicate:
raise AlreadyPendingError()
msg_id = None if conn is None else conn.getPeerId()
self.event_queue.append((key, some_callable, msg_id, conn, args))
if key is not None:
event_queue_dict[key] = n + 1 if n else 1
def executeQueuedEvents(self):
p = self.event_queue.popleft
event_queue_dict = self.event_queue_dict
for _ in xrange(len(self.event_queue)):
key, some_callable, msg_id, conn, args = p()
if key is not None:
n = event_queue_dict[key] - 1
if n:
event_queue_dict[key] = n
else:
del event_queue_dict[key]
if conn is None:
some_callable(*args)
elif not conn.isClosed():
orig_msg_id = conn.getPeerId()
try:
conn.setPeerId(msg_id)
some_callable(conn, *args)
finally:
conn.setPeerId(orig_msg_id)
def logQueuedEvents(self):
if self.event_queue is None:
return
logging.info("Pending events:")
for key, event, _msg_id, _conn, args in self.event_queue:
logging.info(' %r:%r: %r:%r %r %r', key, event.__name__,
_msg_id, _conn, args)
def newTask(self, iterator):
try:
iterator.next()
except StopIteration:
return
self.task_queue.appendleft(iterator)
def closeClient(self, connection):
if connection is not self.replicator.getCurrentConnection() and \
connection not in self.checker.conn_dict:
connection.closeClient()
def shutdown(self, erase=False):
"""Close all connections and exit"""
for c in self.em.getConnectionList():
try:
c.close()
except PrimaryFailure:
pass
# clear database to avoid polluting the cluster at restart
if erase:
self.dm.erase()
logging.info("Application has been asked to shut down")
sys.exit()
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/storage/checker.py 0000664 0000000 0000000 00000021037 12601037530 0025302 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2012-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
from collections import deque
from neo.lib import logging
from neo.lib.connection import ClientConnection
from neo.lib.connector import ConnectorConnectionClosedException
from neo.lib.protocol import NodeTypes, Packets, ZERO_OID
from neo.lib.util import add64, dump
from .handlers.storage import StorageOperationHandler
CHECK_COUNT = 4000
class Checker(object):
def __init__(self, app):
self.app = app
self.queue = deque()
self.conn_dict = {}
def __call__(self, partition, source, min_tid, max_tid):
self.queue.append((partition, source, min_tid, max_tid))
if not self.conn_dict:
self._nextPartition()
def _nextPartition(self):
app = self.app
def connect(node, uuid=app.uuid, name=app.name):
if node.getUUID() == app.uuid:
return
if node.isConnected(connecting=True):
conn = node.getConnection()
conn.asClient()
else:
conn = ClientConnection(app.em, StorageOperationHandler(app),
node)
conn.ask(Packets.RequestIdentification(
NodeTypes.STORAGE, uuid, app.server, name))
self.conn_dict[conn] = node.isIdentified()
conn_set = set(self.conn_dict)
conn_set.discard(None)
try:
self.conn_dict.clear()
while True:
try:
partition, (name, source), min_tid, max_tid = \
self.queue.popleft()
except IndexError:
return
cell = app.pt.getCell(partition, app.uuid)
if cell is None or cell.isOutOfDate():
msg = "discarded or out-of-date"
else:
try:
for cell in app.pt.getCellList(partition):
# XXX: Ignore corrupted cells for the moment
# because we're still unable to fix them
# (see also AdministrationHandler of master)
if cell.isReadable(): #if not cell.isOutOfDate():
connect(cell.getNode())
if source:
node = app.nm.getByAddress(source)
if name:
source = app.nm.createStorage(address=source) \
if node is None else node
connect(source, None, name)
elif (node.getUUID() == app.uuid or
node.isConnected(connecting=True) and
node.getConnection() in self.conn_dict):
source = node
else:
msg = "unavailable source"
if self.conn_dict:
break
msg = "no replica"
except ConnectorConnectionClosedException:
msg = "connection closed"
finally:
conn_set.update(self.conn_dict)
self.conn_dict.clear()
logging.error("Failed to start checking partition %u (%s)",
partition, msg)
conn_set.difference_update(self.conn_dict)
finally:
for conn in conn_set:
app.closeClient(conn)
logging.debug("start checking partition %u from %s to %s",
partition, dump(min_tid), dump(max_tid))
self.min_tid = self.next_tid = min_tid
self.max_tid = max_tid
self.next_oid = None
self.partition = partition
self.source = source
def start():
if app.tm.isLockedTid(max_tid):
app.queueEvent(start)
return
args = partition, CHECK_COUNT, min_tid, max_tid
p = Packets.AskCheckTIDRange(*args)
for conn, identified in self.conn_dict.items():
self.conn_dict[conn] = conn.ask(p) if identified else None
self.conn_dict[None] = app.dm.checkTIDRange(*args)
start()
def connected(self, node):
conn = node.getConnection()
if self.conn_dict.get(conn, self) is None:
self.conn_dict[conn] = conn.ask(Packets.AskCheckTIDRange(
self.partition, CHECK_COUNT, self.next_tid, self.max_tid))
def connectionLost(self, conn):
try:
del self.conn_dict[conn]
except KeyError:
return
if self.source is not None and self.source.getConnection() is conn:
del self.source
elif len(self.conn_dict) > 1:
logging.warning("node lost but keep up checking partition %u",
self.partition)
return
logging.warning("check of partition %u aborted", self.partition)
self._nextPartition()
def _nextRange(self):
if self.next_oid:
args = self.partition, CHECK_COUNT, self.next_tid, self.max_tid, \
self.next_oid
p = Packets.AskCheckSerialRange(*args)
check = self.app.dm.checkSerialRange
else:
args = self.partition, CHECK_COUNT, self.next_tid, self.max_tid
p = Packets.AskCheckTIDRange(*args)
check = self.app.dm.checkTIDRange
for conn in self.conn_dict.keys():
self.conn_dict[conn] = check(*args) if conn is None else conn.ask(p)
def checkRange(self, conn, *args):
if self.conn_dict.get(conn, self) != conn.getPeerId():
# Ignore answers to old requests,
# because we did nothing to cancel them.
logging.info("ignored AnswerCheck*Range%r", args)
return
self.conn_dict[conn] = args
answer_set = set(self.conn_dict.itervalues())
if len(answer_set) > 1:
for answer in answer_set:
if type(answer) is not tuple:
return
# TODO: Automatically tell corrupted cells to fix their data
# if we know a good source.
# For the moment, tell master to put them in CORRUPTED state
# and keep up checking if useful.
uuid = self.app.uuid
args = None if self.source is None else self.conn_dict[
None if self.source.getUUID() == uuid
else self.source.getConnection()]
uuid_list = []
for conn, answer in self.conn_dict.items():
if answer != args:
del self.conn_dict[conn]
if conn is None:
uuid_list.append(uuid)
else:
uuid_list.append(conn.getUUID())
self.app.closeClient(conn)
p = Packets.NotifyPartitionCorrupted(self.partition, uuid_list)
self.app.master_conn.notify(p)
if len(self.conn_dict) <= 1:
logging.warning("check of partition %u aborted", self.partition)
self.queue.clear()
self._nextPartition()
return
try:
count, _, max_tid = args
except ValueError:
count, _, self.next_tid, _, max_oid = args
if count < CHECK_COUNT:
logging.debug("partition %u checked from %s to %s",
self.partition, dump(self.min_tid), dump(self.max_tid))
self._nextPartition()
return
self.next_oid = add64(max_oid, 1)
else:
(count, _, max_tid), = answer_set
if count < CHECK_COUNT:
self.next_tid = self.min_tid
self.next_oid = ZERO_OID
else:
self.next_tid = add64(max_tid, 1)
self._nextRange()
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/storage/database/ 0000775 0000000 0000000 00000000000 12601037530 0025065 5 ustar 00root root 0000000 0000000 neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/storage/database/__init__.py 0000664 0000000 0000000 00000002526 12601037530 0027203 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2006-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
LOG_QUERIES = False
from neo.lib.exception import DatabaseFailure
from .manager import DatabaseManager
DATABASE_MANAGER_DICT = {
'Importer': 'importer.ImporterDatabaseManager',
'MySQL': 'mysqldb.MySQLDatabaseManager',
'SQLite': 'sqlite.SQLiteDatabaseManager',
}
def getAdapterKlass(name):
try:
module, name = DATABASE_MANAGER_DICT[name or 'MySQL'].split('.')
except KeyError:
raise DatabaseFailure('Cannot find a database adapter <%s>' % name)
module = getattr(__import__(__name__, fromlist=[module], level=1), module)
return getattr(module, name)
def buildDatabaseManager(name, args=(), kw={}):
return getAdapterKlass(name)(*args, **kw)
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/storage/database/importer.py 0000664 0000000 0000000 00000046353 12601037530 0027313 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2014-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import cPickle, pickle, time
from bisect import bisect, insort
from collections import deque
from cStringIO import StringIO
from ConfigParser import SafeConfigParser
from ZODB.config import storageFromString
from ZODB.POSException import POSKeyError
from . import buildDatabaseManager, DatabaseManager
from neo.lib import logging, patch, util
from neo.lib.exception import DatabaseFailure
from neo.lib.protocol import CellStates, ZERO_OID, ZERO_TID, ZERO_HASH, MAX_TID
patch.speedupFileStorageTxnLookup()
class Reference(object):
__slots__ = "value",
def __init__(self, value):
self.value = value
class Repickler(pickle.Unpickler):
def __init__(self, persistent_map):
self._f = StringIO()
# Use python implementation for unpickling because loading can not
# be customized enough with cPickle.
pickle.Unpickler.__init__(self, self._f)
# For pickling, it is possible to use the fastest implementation,
# which also generates fewer useless PUT opcodes.
self._p = cPickle.Pickler(self._f, 1)
self.memo = self._p.memo # just a tiny optimization
def persistent_id(obj):
if isinstance(obj, Reference):
r = obj.value
del obj.value # minimize refcnt like for deque+popleft
return r
self._p.inst_persistent_id = persistent_id
def persistent_load(obj):
new_obj = persistent_map(obj)
if new_obj is not obj:
self._changed = True
return Reference(new_obj)
self.persistent_load = persistent_load
def _save(self, data):
self._p.dump(data.popleft())
# remove STOP (no need to truncate since it will always be overridden)
self._f.seek(-1, 1)
def __call__(self, data):
f = self._f
f.truncate(0)
f.write(data)
f.reset()
self._changed = False
try:
classmeta = self.load()
state = self.load()
finally:
self.memo.clear()
if self._changed:
f.truncate(0)
try:
self._p.dump(classmeta).dump(state)
finally:
self.memo.clear()
return f.getvalue()
return data
dispatch = pickle.Unpickler.dispatch.copy()
class _noload(object):
state = None
def __new__(cls, dump):
def load(*args):
self = object.__new__(cls)
self.dump = dump
# We use deque+popleft everywhere to minimize the number of
# references at the moment cPickle considers memoizing an
# object. This reduces the number of useless PUT opcodes and
# usually produces smaller pickles than ZODB. Without this,
# they would, on the contrary, increase in size.
# We could also use optimize from pickletools module.
self.args = deque(args)
self._list = deque()
self.append = self._list.append
self.extend = self._list.extend
self._dict = deque()
return self
return load
def __setitem__(self, *args):
self._dict.append(args)
def dict(self):
while self._dict:
yield self._dict.popleft()
def list(self, pos):
pt = self.args.popleft()
f = pt._f
f.seek(pos + 3) # NONE + EMPTY_TUPLE + REDUCE
put = f.read() # preserve memo if any
f.truncate(pos)
f.write(self.dump(pt, self.args) + put)
while self._list:
yield self._list.popleft()
def __reduce__(self):
return None, (), self.state, \
self.list(self.args[0]._f.tell()), self.dict()
@_noload
def _obj(self, args):
self._f.write(pickle.MARK)
while args:
self._save(args)
return pickle.OBJ
def _instantiate(self, klass, k):
args = self.stack[k+1:]
self.stack[k:] = self._obj(klass, *args),
del dispatch[pickle.NEWOBJ] # ZODB has never used protocol 2
@_noload
def find_class(self, args):
module, name = args
return pickle.GLOBAL + module + '\n' + name + '\n'
@_noload
def _reduce(self, args):
self._save(args)
self._save(args)
return pickle.REDUCE
def load_reduce(self):
stack = self.stack
args = stack.pop()
stack[-1] = self._reduce(stack[-1], args)
dispatch[pickle.REDUCE] = load_reduce
def load_build(self):
stack = self.stack
state = stack.pop()
inst = stack[-1]
assert inst.state is None
inst.state = state
dispatch[pickle.BUILD] = load_build
class ZODB(object):
def __init__(self, storage, oid=0, **kw):
self.oid = int(oid)
self.mountpoints = {k: int(v) for k, v in kw.iteritems()}
self.connect(storage)
self.ltid = util.u64(self.lastTransaction())
if not self.ltid:
raise DatabaseFailure("Can not import empty storage: %s" % storage)
self.mapping = {}
def __getstate__(self):
state = self.__dict__.copy()
del state["data_tid"], state["storage"]
return state
def connect(self, storage):
self.data_tid = {}
self.storage = storageFromString(storage)
def setup(self, zodb_dict, shift_oid=0):
self.shift_oid = shift_oid
self.next_oid = util.u64(self.new_oid())
shift_oid += self.next_oid
for mp, oid in self.mountpoints.iteritems():
mp = zodb_dict[mp]
new_oid = mp.oid
try:
new_oid += mp.shift_oid
except AttributeError:
new_oid += shift_oid
shift_oid = mp.setup(zodb_dict, shift_oid)
self.mapping[oid] = new_oid
del self.mountpoints
return shift_oid
def repickle(self, data):
if not (self.shift_oid or self.mapping):
self.repickle = lambda x: x
return data
u64 = util.u64
p64 = util.p64
def map_oid(obj):
if isinstance(obj, tuple) and len(obj) == 2:
oid = u64(obj[0])
# If this oid pointed to a mount point, drop 2nd item because
# it's probably different than the real class of the new oid.
elif isinstance(obj, str):
oid = u64(obj)
else:
raise NotImplementedError(
"Unsupported external reference: %r" % obj)
try:
return p64(self.mapping[oid])
except KeyError:
if not self.shift_oid:
return obj # common case for root db
oid = p64(oid + self.shift_oid)
return oid if isinstance(obj, str) else (oid, obj[1])
self.repickle = Repickler(map_oid)
return self.repickle(data)
def __getattr__(self, attr):
if attr == '__setstate__':
return object.__getattribute__(self, attr)
return getattr(self.storage, attr)
def getDataTid(self, oid, tid):
try:
return self.data_tid[tid].get(oid)
except KeyError:
assert tid not in self.data_tid, (oid, tid)
p_tid = util.p64(tid)
txn = next(self.storage.iterator(p_tid))
if txn.tid != p_tid:
raise
u64 = util.u64
txn = self.data_tid[tid] = {
u64(x.oid): x.data_txn
for x in txn if x.data_txn}
return txn.get(oid)
class ZODBIterator(object):
def __init__(self, zodb, *args, **kw):
iterator = zodb.iterator(*args, **kw)
def _next():
self.transaction = next(iterator)
_next()
self.zodb = zodb
self.next = _next
tid = property(lambda self: self.transaction.tid)
def __lt__(self, other):
return self.tid < other.tid or self.tid == other.tid \
and self.zodb.shift_oid < other.zodb.shift_oid
class ImporterDatabaseManager(DatabaseManager):
"""Proxy that transparently imports data from a ZODB storage
"""
_last_commit = 0
def __init__(self, *args, **kw):
super(ImporterDatabaseManager, self).__init__(*args, **kw)
self.db._connect()
_uncommitted_data = property(
lambda self: self.db._uncommitted_data,
lambda self, value: setattr(self.db, "_uncommitted_data", value))
def _parse(self, database):
config = SafeConfigParser()
config.read(database)
sections = config.sections()
# XXX: defaults copy & pasted from elsewhere - refactoring needed
main = {'adapter': 'MySQL', 'wait': 0}
main.update(config.items(sections.pop(0)))
self.zodb = ((x, dict(config.items(x))) for x in sections)
self.compress = main.get('compress', 1)
self.db = buildDatabaseManager(main['adapter'],
(main['database'], main.get('engine'), main['wait']))
for x in """query erase getConfiguration _setConfiguration
getPartitionTable changePartitionTable getUnfinishedTIDList
dropUnfinishedData storeTransaction finishTransaction
storeData
""".split():
setattr(self, x, getattr(self.db, x))
def commit(self):
self.db.commit()
self._last_commit = time.time()
def setNumPartitions(self, num_partitions):
self.db.setNumPartitions(num_partitions)
try:
del self._getPartition
except AttributeError:
pass
def close(self):
self.db.close()
if isinstance(self.zodb, list): # _setup called
for zodb in self.zodb:
zodb.close()
def _setup(self):
self.db._setup()
zodb_state = self.getConfiguration("zodb")
if zodb_state:
logging.warning("Ignoring configuration file for oid mapping."
" Reloading it from NEO storage.")
zodb = cPickle.loads(zodb_state)
for k, v in self.zodb:
zodb[k].connect(v["storage"])
else:
zodb = {k: ZODB(**v) for k, v in self.zodb}
x, = (x for x in zodb.itervalues() if not x.oid)
x.setup(zodb)
self.setConfiguration("zodb", cPickle.dumps(zodb))
self.zodb_index, self.zodb = zip(*sorted(
(x.shift_oid, x) for x in zodb.itervalues()))
self.zodb_ltid = max(x.ltid for x in self.zodb)
zodb = self.zodb[-1]
self.zodb_loid = zodb.shift_oid + zodb.next_oid - 1
self.zodb_tid = self.db.getLastTID(self.zodb_ltid) or 0
self._import = self._import()
def doOperation(self, app):
if self._import:
app.newTask(self._import)
def _import(self):
p64 = util.p64
u64 = util.u64
tid = p64(self.zodb_tid + 1)
zodb_list = []
for zodb in self.zodb:
try:
zodb_list.append(ZODBIterator(zodb, tid, p64(self.zodb_ltid)))
except StopIteration:
pass
tid = None
def finish():
if tid:
self.storeTransaction(tid, object_list, (
(x[0] for x in object_list),
str(txn.user), str(txn.description),
cPickle.dumps(txn.extension), False, tid), False)
self.releaseData(data_id_list)
logging.debug("TXN %s imported (user=%r, desc=%r, len(oid)=%s)",
util.dump(tid), txn.user, txn.description, len(object_list))
del object_list[:], data_id_list[:]
if self._last_commit + 1 < time.time():
self.commit()
self.zodb_tid = u64(tid)
if self.compress:
from zlib import compress
else:
compress = None
compression = 0
object_list = []
data_id_list = []
while zodb_list:
zodb_list.sort()
z = zodb_list[0]
# Merge transactions with same tid. Only
# user/desc/ext from first ZODB are kept.
if tid != z.tid:
finish()
txn = z.transaction
tid = txn.tid
yield 1
zodb = z.zodb
for r in z.transaction:
oid = p64(u64(r.oid) + zodb.shift_oid)
data_tid = r.data_txn
if data_tid or r.data is None:
data_id = None
else:
data = zodb.repickle(r.data)
if compress:
compressed_data = compress(data)
compression = len(compressed_data) < len(data)
if compression:
data = compressed_data
checksum = util.makeChecksum(data)
data_id = self.holdData(util.makeChecksum(data), data,
compression)
data_id_list.append(data_id)
object_list.append((oid, data_id, data_tid))
# Give the main loop the opportunity to process requests
# from other nodes. In particular, clients may commit. If the
# storage node exits after such commit, and before we actually
# update 'obj' with 'object_list', some rows in 'data' may be
# unreferenced. This is not a problem because the leak is
# solved when resuming the migration.
yield 1
try:
z.next()
except StopIteration:
del zodb_list[0]
self._last_commit = 0
finish()
logging.warning("All data are imported. You should change"
" your configuration to use the native backend and restart.")
self._import = None
for x in """getObject objectPresent getReplicationTIDList
""".split():
setattr(self, x, getattr(self.db, x))
def inZodb(self, oid, tid=None, before_tid=None):
return oid <= self.zodb_loid and (
self.zodb_tid < before_tid if before_tid else
tid is None or self.zodb_tid < tid <= self.zodb_ltid)
def zodbFromOid(self, oid):
zodb = self.zodb[bisect(self.zodb_index, oid) - 1]
return zodb, oid - zodb.shift_oid
def getLastIDs(self, all=True):
tid, _, _, oid = self.db.getLastIDs(all)
return (max(tid, util.p64(self.zodb_ltid)), None, None,
max(oid, util.p64(self.zodb_loid)))
def objectPresent(self, oid, tid, all=True):
r = self.db.objectPresent(oid, tid, all)
if not r:
u_oid = util.u64(oid)
u_tid = util.u64(tid)
if self.inZodb(u_oid, u_tid):
zodb, oid = self.zodbFromOid(u_oid)
try:
return zodb.loadSerial(util.p64(oid), tid)
except POSKeyError:
pass
def getObject(self, oid, tid=None, before_tid=None):
u64 = util.u64
u_oid = u64(oid)
u_tid = tid and u64(tid)
u_before_tid = before_tid and u64(before_tid)
db = self.db
if self.zodb_tid < (u_before_tid - 1 if before_tid else
u_tid or 0) <= self.zodb_ltid:
o = None
else:
o = db.getObject(oid, tid, before_tid)
if o and self.zodb_ltid < u64(o[0]) or \
not self.inZodb(u_oid, u_tid, u_before_tid):
return o
p64 = util.p64
zodb, z_oid = self.zodbFromOid(u_oid)
try:
value, serial, next_serial = zodb.loadBefore(p64(z_oid),
before_tid or (util.p64(u_tid + 1) if tid else MAX_TID))
except TypeError: # loadBefore returned None
return False
except POSKeyError:
assert not o, o
return o
if serial != tid:
if tid:
return False
u_tid = u64(serial)
if u_tid <= self.zodb_tid and o:
return o
if value:
value = zodb.repickle(value)
checksum = util.makeChecksum(value)
else:
# CAVEAT: Although we think loadBefore should not return an empty
# value for a deleted object (see comment in NEO Storage),
# there's no need to distinguish this case in the above
# except clause because it would be crazy to import a
# NEO DB using this backend.
checksum = None
return (serial, next_serial or
db._getNextTID(db._getPartition(u_oid), u_oid, u_tid),
0, checksum, value, zodb.getDataTid(z_oid, u_tid))
def getTransaction(self, tid, all=False):
u64 = util.u64
if self.zodb_tid < u64(tid) <= self.zodb_ltid:
for zodb in self.zodb:
for txn in zodb.iterator(tid, tid):
p64 = util.p64
shift_oid = zodb.shift_oid
return ([p64(u64(x.oid) + shift_oid) for x in txn],
txn.user, txn.description,
cPickle.dumps(txn.extension), 0, tid)
else:
return self.db.getTransaction(tid, all)
def getReplicationTIDList(self, min_tid, max_tid, length, partition):
p64 = util.p64
tid = p64(self.zodb_tid)
if min_tid <= tid:
r = self.db.getReplicationTIDList(min_tid, min(max_tid, tid),
length, partition)
if max_tid <= tid:
return r
length -= len(r)
min_tid = p64(self.zodb_tid + 1)
else:
r = []
if length:
tid = p64(self.zodb_ltid)
if min_tid <= tid:
u64 = util.u64
def next_tid(i):
for txn in i:
tid = u64(txn.tid)
if self._getPartition(tid) == partition:
insort(z, (-tid, i))
break
z = []
for zodb in self.zodb:
next_tid(zodb.iterator(min_tid, min(max_tid, tid)))
while z:
t, i = z.pop()
r.append(p64(-t))
length -= 1
if not length:
return r
next_tid(i)
if tid < max_tid:
r += self.db.getReplicationTIDList(max(min_tid, tid), max_tid,
length, partition)
return r
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/storage/database/manager.py 0000664 0000000 0000000 00000052020 12601037530 0027050 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2006-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
from collections import defaultdict
from functools import wraps
from neo.lib import logging, util
from neo.lib.protocol import ZERO_TID, BackendNotImplemented
def lazymethod(func):
def getter(self):
cls = self.__class__
name = func.__name__
assert name not in cls.__dict__
setattr(cls, name, func(self))
return getattr(self, name)
return property(getter, doc=func.__doc__)
def fallback(func):
def warn(self):
logging.info("Fallback to generic/slow implementation of %s."
" It should be overridden by backend storage (%s).",
func.__name__, self.__class__.__name__)
return func
return lazymethod(wraps(func)(warn))
def splitOIDField(tid, oids):
if len(oids) % 8:
raise DatabaseFailure('invalid oids length for tid %s: %s'
% (tid, len(oids)))
return [oids[i:i+8] for i in xrange(0, len(oids), 8)]
class CreationUndone(Exception):
pass
class DatabaseManager(object):
"""This class only describes an interface for database managers."""
ENGINES = ()
def __init__(self, database, engine=None, wait=0):
"""
Initialize the object.
"""
if engine:
if engine not in self.ENGINES:
raise ValueError("Unsupported engine: %r not in %r"
% (engine, self.ENGINES))
self._engine = engine
self._wait = wait
self._parse(database)
def __getattr__(self, attr):
if attr == "_getPartition":
np = self.getNumPartitions()
value = lambda x: x % np
else:
return self.__getattribute__(attr)
setattr(self, attr, value)
return value
def _parse(self, database):
"""Called during instanciation, to process database parameter."""
pass
def setup(self, reset=0):
"""Set up a database, discarding existing data first if reset is True
"""
if reset:
self.erase()
self._uncommitted_data = defaultdict(int)
self._setup()
def _setup(self):
"""To be overriden by the backend to set up a database
It must recover self._uncommitted_data from temporary object table.
_uncommitted_data is already instantiated and must be updated with
refcounts to data of write-locked objects, except in case of undo,
where the refcount is increased later, when the object is read-locked.
Keys are data ids and values are number of references.
"""
raise NotImplementedError
def doOperation(self, app):
pass
def commit(self):
pass
def getConfiguration(self, key):
"""
Return a configuration value, returns None if not found or not set
"""
raise NotImplementedError
def setConfiguration(self, key, value):
"""
Set a configuration value
"""
self._setConfiguration(key, value)
self.commit()
def _setConfiguration(self, key, value):
raise NotImplementedError
def getUUID(self):
"""
Load a NID from a database.
"""
nid = self.getConfiguration('nid')
if nid is not None:
return int(nid)
def setUUID(self, nid):
"""
Store a NID into a database.
"""
self.setConfiguration('nid', str(nid))
def getNumPartitions(self):
"""
Load the number of partitions from a database.
"""
n = self.getConfiguration('partitions')
if n is not None:
return int(n)
def setNumPartitions(self, num_partitions):
"""
Store the number of partitions into a database.
"""
self.setConfiguration('partitions', num_partitions)
try:
del self._getPartition
except AttributeError:
pass
def getNumReplicas(self):
"""
Load the number of replicas from a database.
"""
n = self.getConfiguration('replicas')
if n is not None:
return int(n)
def setNumReplicas(self, num_replicas):
"""
Store the number of replicas into a database.
"""
self.setConfiguration('replicas', num_replicas)
def getName(self):
"""
Load a name from a database.
"""
return self.getConfiguration('name')
def setName(self, name):
"""
Store a name into a database.
"""
self.setConfiguration('name', name)
def getPTID(self):
"""
Load a Partition Table ID from a database.
"""
ptid = self.getConfiguration('ptid')
if ptid is not None:
return long(ptid)
def setPTID(self, ptid):
"""
Store a Partition Table ID into a database.
"""
if ptid is not None:
assert isinstance(ptid, (int, long)), ptid
ptid = str(ptid)
self.setConfiguration('ptid', ptid)
def getBackupTID(self):
return util.bin(self.getConfiguration('backup_tid'))
def setBackupTID(self, backup_tid):
tid = util.dump(backup_tid)
logging.debug('backup_tid = %s', tid)
return self.setConfiguration('backup_tid', tid)
def _setPackTID(self, tid):
self._setConfiguration('_pack_tid', tid)
def _getPackTID(self):
try:
return int(self.getConfiguration('_pack_tid'))
except TypeError:
return -1
def getPartitionTable(self):
"""Return a whole partition table as a sequence of rows. Each row
is again a tuple of an offset (row ID), the NID of a storage
node, and a cell state."""
raise NotImplementedError
def getLastTID(self, max_tid):
"""Return greatest tid in trans table that is <= given 'max_tid'
Required only to import a DB using Importer backend.
max_tid must be in unpacked format.
"""
raise NotImplementedError
def _getLastIDs(self, all=True):
raise NotImplementedError
def getLastIDs(self, all=True):
trans, obj, oid = self._getLastIDs()
if trans:
tid = max(trans.itervalues())
if obj:
tid = max(tid, max(obj.itervalues()))
else:
tid = max(obj.itervalues()) if obj else None
return tid, trans, obj, oid
def getUnfinishedTIDList(self):
"""Return a list of unfinished transaction's IDs."""
raise NotImplementedError
def objectPresent(self, oid, tid, all = True):
"""Return true iff an object specified by a given pair of an
object ID and a transaction ID is present in a database.
Otherwise, return false. If all is true, the object must be
searched from unfinished transactions as well."""
raise NotImplementedError
@fallback
def getLastObjectTID(self, oid):
"""Return the latest tid of given oid or None if it does not exist"""
r = self.getObject(oid)
return r and r[0]
def _getNextTID(self, partition, oid, tid):
"""
partition (int)
Must be the result of (oid % self.getPartition(oid))
oid (int)
Identifier of object to retrieve.
tid (int)
Exact serial to retrieve.
If tid is the last revision of oid, None is returned.
"""
raise NotImplementedError
def _getObject(self, oid, tid=None, before_tid=None):
"""
oid (int)
Identifier of object to retrieve.
tid (int, None)
Exact serial to retrieve.
before_tid (packed, None)
Serial to retrieve is the highest existing one strictly below this
value.
"""
raise NotImplementedError
def getObject(self, oid, tid=None, before_tid=None):
"""
oid (packed)
Identifier of object to retrieve.
tid (packed, None)
Exact serial to retrieve.
before_tid (packed, None)
Serial to retrieve is the highest existing one strictly below this
value.
Return value:
None: Given oid doesn't exist in database.
False: No record found, but another one exists for given oid.
6-tuple: Record content.
- record serial (packed)
- serial or next record modifying object (packed, None)
- compression (boolean-ish, None)
- checksum (integer, None)
- data (binary string, None)
- data_serial (packed, None)
"""
u64 = util.u64
r = self._getObject(u64(oid), tid and u64(tid),
before_tid and u64(before_tid))
try:
serial, next_serial, compression, checksum, data, data_serial = r
except TypeError:
# See if object exists at all
return (tid or before_tid) and self.getLastObjectTID(oid) and False
return (util.p64(serial),
None if next_serial is None else util.p64(next_serial),
compression, checksum, data,
None if data_serial is None else util.p64(data_serial))
def changePartitionTable(self, ptid, cell_list, reset=False):
"""Change a part of a partition table. The list of cells is
a tuple of tuples, each of which consists of an offset (row ID),
the NID of a storage node, and a cell state. The Partition
Table ID must be stored as well. If reset is True, existing data
is first thrown away."""
raise NotImplementedError
def dropPartitions(self, offset_list):
"""Delete all data for specified partitions"""
raise NotImplementedError
def dropUnfinishedData(self):
"""Drop any unfinished data from a database."""
raise NotImplementedError
def storeTransaction(self, tid, object_list, transaction, temporary = True):
"""Store a transaction temporarily, if temporary is true. Note
that this transaction is not finished yet. The list of objects
contains tuples, each of which consists of an object ID,
a data_id and object serial.
The transaction is either None or a tuple of the list of OIDs,
user information, a description, extension information and transaction
pack state (True for packed)."""
raise NotImplementedError
def _pruneData(self, data_id_list):
"""To be overriden by the backend to delete any unreferenced data
'unreferenced' means:
- not in self._uncommitted_data
- and not referenced by a fully-committed object (storage should have
an index or a refcount of all data ids of all objects)
"""
raise NotImplementedError
def storeData(self, checksum, data, compression):
"""To be overriden by the backend to store object raw data
If same data was already stored, the storage only has to check there's
no hash collision.
"""
raise NotImplementedError
def holdData(self, checksum_or_id, *args):
"""Store raw data of temporary object
If 'checksum_or_id' is a checksum, it must be the result of
makeChecksum(data) and extra parameters must be (data, compression)
where 'compression' indicates if 'data' is compressed.
A volatile reference is set to this data until 'releaseData' is called
with this checksum.
If called with only an id, it only increment the volatile
reference to the data matching the id.
"""
if args:
checksum_or_id = self.storeData(checksum_or_id, *args)
self._uncommitted_data[checksum_or_id] += 1
return checksum_or_id
def releaseData(self, data_id_list, prune=False):
"""Release 1 volatile reference to given list of data ids
If 'prune' is true, any data that is not referenced anymore (either by
a volatile reference or by a fully-committed object) is deleted.
"""
refcount = self._uncommitted_data
for data_id in data_id_list:
count = refcount[data_id] - 1
if count:
refcount[data_id] = count
else:
del refcount[data_id]
if prune:
self._pruneData(data_id_list)
self.commit()
@fallback
def _getDataTID(self, oid, tid=None, before_tid=None):
"""
Return a 2-tuple:
tid (int)
tid corresponding to received parameters
serial
data tid of the found record
(None, None) is returned if requested object and transaction
could not be found.
This method only exists for performance reasons, by not returning data:
_getObject already returns these values but it is slower.
"""
r = self._getObject(oid, tid, before_tid)
return (r[0], r[-1]) if r else (None, None)
def findUndoTID(self, oid, tid, ltid, undone_tid, transaction_object):
"""
oid
Object OID
tid
Transation doing the undo
ltid
Upper (exclued) bound of transactions visible to transaction doing
the undo.
undone_tid
Transaction to undo
transaction_object
Object data from memory, if it was modified by running
transaction.
None if is was not modified by running transaction.
Returns a 3-tuple:
current_tid (p64)
TID of most recent version of the object client's transaction can
see. This is used later to detect current conflicts (eg, another
client modifying the same object in parallel)
data_tid (int)
TID containing (without indirection) the data prior to undone
transaction.
None if object doesn't exist prior to transaction being undone
(its creation is being undone).
is_current (bool)
False if object was modified by later transaction (ie, data_tid is
not current), True otherwise.
"""
u64 = util.u64
p64 = util.p64
oid = u64(oid)
tid = u64(tid)
if ltid:
ltid = u64(ltid)
undone_tid = u64(undone_tid)
def getDataTID(tid=None, before_tid=None):
tid, data_tid = self._getDataTID(oid, tid, before_tid)
current_tid = tid
while data_tid:
if data_tid < tid:
tid, data_tid = self._getDataTID(oid, data_tid)
if tid is not None:
continue
logging.error("Incorrect data serial for oid %s at tid %s",
oid, current_tid)
return current_tid, current_tid
return current_tid, tid
if transaction_object:
current_tid = current_data_tid = u64(transaction_object[2])
else:
current_tid, current_data_tid = getDataTID(before_tid=ltid)
if current_tid is None:
return (None, None, False)
found_undone_tid, undone_data_tid = getDataTID(tid=undone_tid)
assert found_undone_tid is not None, (oid, undone_tid)
is_current = undone_data_tid in (current_data_tid, tid)
# Load object data as it was before given transaction.
# It can be None, in which case it means we are undoing object
# creation.
_, data_tid = getDataTID(before_tid=undone_tid)
if data_tid is not None:
data_tid = p64(data_tid)
return p64(current_tid), data_tid, is_current
def finishTransaction(self, tid):
"""Finish a transaction specified by a given ID, by moving
temporarily data to a finished area."""
raise NotImplementedError
def deleteTransaction(self, tid, oid_list=()):
"""Delete a transaction and its content specified by a given ID and
an oid list"""
raise NotImplementedError
def deleteObject(self, oid, serial=None):
"""Delete given object. If serial is given, only delete that serial for
given oid."""
raise NotImplementedError
def _deleteRange(self, partition, min_tid=None, max_tid=None):
"""Delete all objects and transactions between given min_tid (excluded)
and max_tid (included)"""
raise NotImplementedError
def truncate(self, tid):
assert tid not in (None, ZERO_TID), tid
assert self.getBackupTID()
self.setBackupTID(None) # XXX
for partition in xrange(self.getNumPartitions()):
self._deleteRange(partition, tid)
self.commit()
def getTransaction(self, tid, all = False):
"""Return a tuple of the list of OIDs, user information,
a description, and extension information, for a given transaction
ID. If there is no such transaction ID in a database, return None.
If all is true, the transaction must be searched from a temporary
area as well."""
raise NotImplementedError
def getObjectHistory(self, oid, offset, length):
"""Return a list of serials and sizes for a given object ID.
The length specifies the maximum size of such a list. Result starts
with latest serial, and the list must be sorted in descending order.
If there is no such object ID in a database, return None."""
raise BackendNotImplemented(self.getObjectHistory)
def getReplicationObjectList(self, min_tid, max_tid, length, partition,
min_oid):
"""Return a dict of length oids grouped by serial at (or above)
min_tid and min_oid and below max_tid, for given partition,
sorted in ascending order."""
raise NotImplementedError
def getTIDList(self, offset, length, partition_list):
"""Return a list of TIDs in ascending order from an offset,
at most the specified length. The list of partitions are passed
to filter out non-applicable TIDs."""
raise BackendNotImplemented(self.getTIDList)
def getReplicationTIDList(self, min_tid, max_tid, length, partition):
"""Return a list of TIDs in ascending order from an initial tid value,
at most the specified length up to max_tid. The partition number is
passed to filter out non-applicable TIDs."""
raise NotImplementedError
def pack(self, tid, updateObjectDataForPack):
"""Prune all non-current object revisions at given tid.
updateObjectDataForPack is a function called for each deleted object
and revision with:
- OID
- packed TID
- new value_serial
If object data was moved to an after-pack-tid revision, this
parameter contains the TID of that revision, allowing to backlink
to it.
- getObjectData function
To call if value_serial is None and an object needs to be updated.
Takes no parameter, returns a 3-tuple: compression, data_id,
value
"""
raise NotImplementedError
def checkTIDRange(self, partition, length, min_tid, max_tid):
"""
Generate a diggest from transaction list.
min_tid (packed)
TID at which verification starts.
length (int)
Maximum number of records to include in result.
Returns a 3-tuple:
- number of records actually found
- a SHA1 computed from record's TID
ZERO_HASH if no record found
- biggest TID found (ie, TID of last record read)
ZERO_TID if not record found
"""
raise NotImplementedError
def checkSerialRange(self, partition, length, min_tid, max_tid, min_oid):
"""
Generate a diggest from object list.
min_oid (packed)
OID at which verification starts.
min_tid (packed)
Serial of min_oid object at which search should start.
length
Maximum number of records to include in result.
Returns a 5-tuple:
- number of records actually found
- a SHA1 computed from record's OID
ZERO_HASH if no record found
- biggest OID found (ie, OID of last record read)
ZERO_OID if no record found
- a SHA1 computed from record's serial
ZERO_HASH if no record found
- biggest serial found for biggest OID found (ie, serial of last
record read)
ZERO_TID if no record found
"""
raise NotImplementedError
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/storage/database/mysqldb.py 0000664 0000000 0000000 00000074344 12601037530 0027126 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2006-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
from binascii import a2b_hex
import MySQLdb
from MySQLdb import DataError, IntegrityError, OperationalError
from MySQLdb.constants.CR import SERVER_GONE_ERROR, SERVER_LOST
from MySQLdb.constants.ER import DATA_TOO_LONG, DUP_ENTRY
from array import array
from hashlib import sha1
import re
import string
import struct
import time
from . import DatabaseManager, LOG_QUERIES
from .manager import CreationUndone, splitOIDField
from neo.lib import logging, util
from neo.lib.exception import DatabaseFailure
from neo.lib.protocol import CellStates, ZERO_OID, ZERO_TID, ZERO_HASH
def getPrintableQuery(query, max=70):
return ''.join(c if c in string.printable and c not in '\t\x0b\x0c\r'
else '\\x%02x' % ord(c) for c in query)
class MySQLDatabaseManager(DatabaseManager):
"""This class manages a database on MySQL."""
ENGINES = "InnoDB", "TokuDB"
_engine = ENGINES[0] # default engine
# Disabled even on MySQL 5.1-5.5 and MariaDB 5.2-5.3 because
# 'select count(*) from obj' sometimes returns incorrect values
# (tested with testOudatedCellsOnDownStorage).
_use_partition = False
def __init__(self, *args, **kw):
super(MySQLDatabaseManager, self).__init__(*args, **kw)
self.conn = None
self._config = {}
self._connect()
def _parse(self, database):
""" Get the database credentials (username, password, database) """
# expected pattern : [user[:password]@]database[(.|/)unix_socket]
self.user, self.passwd, self.db, self.socket = re.match(
'(?:([^:]+)(?::(.*))?@)?([^./]+)(.+)?$', database).groups()
def close(self):
self.conn.close()
def _connect(self):
kwd = {'db' : self.db, 'user' : self.user}
if self.passwd is not None:
kwd['passwd'] = self.passwd
if self.socket:
kwd['unix_socket'] = self.socket
logging.info('connecting to MySQL on the database %s with user %s',
self.db, self.user)
if self._wait < 0:
timeout_at = None
else:
timeout_at = time.time() + self._wait
while True:
try:
self.conn = MySQLdb.connect(**kwd)
break
except Exception:
if timeout_at is not None and time.time() >= timeout_at:
raise
logging.exception('Connection to MySQL failed, retrying.')
time.sleep(1)
self._active = 0
self.conn.autocommit(False)
self.conn.query("SET SESSION group_concat_max_len = %u" % (2**32-1))
self.conn.set_sql_mode("TRADITIONAL,NO_ENGINE_SUBSTITUTION")
def commit(self):
logging.debug('committing...')
self.conn.commit()
self._active = 0
def query(self, query):
"""Query data from a database."""
if LOG_QUERIES:
logging.debug('querying %s...',
getPrintableQuery(query.split('\n', 1)[0][:70]))
while 1:
conn = self.conn
try:
conn.query(query)
if query.startswith("SELECT "):
r = conn.store_result()
return tuple([
tuple([d.tostring() if isinstance(d, array) else d
for d in row])
for row in r.fetch_row(r.num_rows())])
break
except OperationalError, m:
if self._active or m[0] not in (SERVER_GONE_ERROR, SERVER_LOST):
raise DatabaseFailure('MySQL error %d: %s\nQuery: %s'
% (m[0], m[1], getPrintableQuery(query[:1000])))
logging.info('the MySQL server is gone; reconnecting')
self._connect()
r = query.split(None, 1)[0]
if r in ("INSERT", "REPLACE", "DELETE", "UPDATE"):
self._active = 1
else:
assert r in ("ALTER", "CREATE", "DROP", "TRUNCATE"), query
@property
def escape(self):
"""Escape special characters in a string."""
return self.conn.escape_string
def erase(self):
self.query(
"DROP TABLE IF EXISTS config, pt, trans, obj, data, ttrans, tobj")
def _setup(self):
self._config.clear()
q = self.query
p = engine = self._engine
# The table "config" stores configuration parameters which affect the
# persistent data.
q("""CREATE TABLE IF NOT EXISTS config (
name VARBINARY(255) NOT NULL PRIMARY KEY,
value VARBINARY(255) NULL
) ENGINE=""" + engine)
# The table "pt" stores a partition table.
q("""CREATE TABLE IF NOT EXISTS pt (
rid INT UNSIGNED NOT NULL,
nid INT NOT NULL,
state TINYINT UNSIGNED NOT NULL,
PRIMARY KEY (rid, nid)
) ENGINE=""" + engine)
if self._use_partition:
p += """ PARTITION BY LIST (`partition`) (
PARTITION dummy VALUES IN (NULL))"""
# The table "trans" stores information on committed transactions.
q("""CREATE TABLE IF NOT EXISTS trans (
`partition` SMALLINT UNSIGNED NOT NULL,
tid BIGINT UNSIGNED NOT NULL,
packed BOOLEAN NOT NULL,
oids MEDIUMBLOB NOT NULL,
user BLOB NOT NULL,
description BLOB NOT NULL,
ext BLOB NOT NULL,
ttid BIGINT UNSIGNED NOT NULL,
PRIMARY KEY (`partition`, tid)
) ENGINE=""" + p)
# The table "obj" stores committed object metadata.
q("""CREATE TABLE IF NOT EXISTS obj (
`partition` SMALLINT UNSIGNED NOT NULL,
oid BIGINT UNSIGNED NOT NULL,
tid BIGINT UNSIGNED NOT NULL,
data_id BIGINT UNSIGNED NULL,
value_tid BIGINT UNSIGNED NULL,
PRIMARY KEY (`partition`, tid, oid),
KEY (`partition`, oid, tid),
KEY (data_id)
) ENGINE=""" + p)
if engine == "TokuDB":
engine += " compression='tokudb_uncompressed'"
# The table "data" stores object data.
# We'd like to have partial index on 'hash' colum (e.g. hash(4))
# but 'UNIQUE' constraint would not work as expected.
q("""CREATE TABLE IF NOT EXISTS data (
id BIGINT UNSIGNED NOT NULL AUTO_INCREMENT PRIMARY KEY,
hash BINARY(20) NOT NULL,
compression TINYINT UNSIGNED NULL,
value MEDIUMBLOB NOT NULL,
UNIQUE (hash, compression)
) ENGINE=""" + engine)
q("""CREATE TABLE IF NOT EXISTS bigdata (
id INT UNSIGNED NOT NULL AUTO_INCREMENT PRIMARY KEY,
value MEDIUMBLOB NOT NULL
) ENGINE=""" + engine)
# The table "ttrans" stores information on uncommitted transactions.
q("""CREATE TABLE IF NOT EXISTS ttrans (
`partition` SMALLINT UNSIGNED NOT NULL,
tid BIGINT UNSIGNED NOT NULL,
packed BOOLEAN NOT NULL,
oids MEDIUMBLOB NOT NULL,
user BLOB NOT NULL,
description BLOB NOT NULL,
ext BLOB NOT NULL,
ttid BIGINT UNSIGNED NOT NULL
) ENGINE=""" + engine)
# The table "tobj" stores uncommitted object metadata.
q("""CREATE TABLE IF NOT EXISTS tobj (
`partition` SMALLINT UNSIGNED NOT NULL,
oid BIGINT UNSIGNED NOT NULL,
tid BIGINT UNSIGNED NOT NULL,
data_id BIGINT UNSIGNED NULL,
value_tid BIGINT UNSIGNED NULL,
PRIMARY KEY (tid, oid)
) ENGINE=""" + engine)
self._uncommitted_data.update(q("SELECT data_id, count(*)"
" FROM tobj WHERE data_id IS NOT NULL GROUP BY data_id"))
def getConfiguration(self, key):
try:
return self._config[key]
except KeyError:
sql_key = self.escape(str(key))
try:
r = self.query("SELECT value FROM config WHERE name = '%s'"
% sql_key)[0][0]
except IndexError:
r = None
self._config[key] = r
return r
def _setConfiguration(self, key, value):
q = self.query
e = self.escape
self._config[key] = value
k = e(str(key))
if value is None:
q("DELETE FROM config WHERE name = '%s'" % k)
return
value = str(value)
sql = "REPLACE INTO config VALUES ('%s', '%s')" % (k, e(value))
try:
q(sql)
except DataError, (code, _):
if code != DATA_TOO_LONG or len(value) < 256 or key != "zodb":
raise
q("ALTER TABLE config MODIFY value VARBINARY(%s) NULL" % len(value))
q(sql)
def getPartitionTable(self):
return self.query("SELECT * FROM pt")
def getLastTID(self, max_tid):
return self.query("SELECT MAX(tid) FROM trans WHERE tid<=%s"
% max_tid)[0][0]
def _getLastIDs(self, all=True):
p64 = util.p64
q = self.query
trans = {partition: p64(tid)
for partition, tid in q("SELECT `partition`, MAX(tid)"
" FROM trans GROUP BY `partition`")}
obj = {partition: p64(tid)
for partition, tid in q("SELECT `partition`, MAX(tid)"
" FROM obj GROUP BY `partition`")}
oid = q("SELECT MAX(oid) FROM (SELECT MAX(oid) AS oid FROM obj"
" GROUP BY `partition`) as t")[0][0]
if all:
tid = q("SELECT MAX(tid) FROM ttrans")[0][0]
if tid is not None:
trans[None] = p64(tid)
tid, toid = q("SELECT MAX(tid), MAX(oid) FROM tobj")[0]
if tid is not None:
obj[None] = p64(tid)
if toid is not None and (oid < toid or oid is None):
oid = toid
return trans, obj, None if oid is None else p64(oid)
def getUnfinishedTIDList(self):
p64 = util.p64
return [p64(t[0]) for t in self.query("SELECT tid FROM ttrans"
" UNION SELECT tid FROM tobj")]
def objectPresent(self, oid, tid, all = True):
oid = util.u64(oid)
tid = util.u64(tid)
q = self.query
return q("SELECT 1 FROM obj WHERE `partition`=%d AND oid=%d AND tid=%d"
% (self._getPartition(oid), oid, tid)) or all and \
q("SELECT 1 FROM tobj WHERE tid=%d AND oid=%d" % (tid, oid))
def getLastObjectTID(self, oid):
oid = util.u64(oid)
r = self.query("SELECT tid FROM obj"
" WHERE `partition`=%d AND oid=%d"
" ORDER BY tid DESC LIMIT 1"
% (self._getPartition(oid), oid))
return util.p64(r[0][0]) if r else None
def _getNextTID(self, *args): # partition, oid, tid
r = self.query("SELECT tid FROM obj"
" WHERE `partition`=%d AND oid=%d AND tid>%d"
" ORDER BY tid LIMIT 1" % args)
return r[0][0] if r else None
def _getObject(self, oid, tid=None, before_tid=None):
q = self.query
partition = self._getPartition(oid)
sql = ('SELECT tid, compression, data.hash, value, value_tid'
' FROM obj LEFT JOIN data ON (obj.data_id = data.id)'
' WHERE `partition` = %d AND oid = %d') % (partition, oid)
if before_tid is not None:
sql += ' AND tid < %d ORDER BY tid DESC LIMIT 1' % before_tid
elif tid is not None:
sql += ' AND tid = %d' % tid
else:
# XXX I want to express "HAVING tid = MAX(tid)", but
# MySQL does not use an index for a HAVING clause!
sql += ' ORDER BY tid DESC LIMIT 1'
r = q(sql)
try:
serial, compression, checksum, data, value_serial = r[0]
except IndexError:
return None
if compression and compression & 0x80:
compression &= 0x7f
data = ''.join(self._bigData(data))
return (serial, self._getNextTID(partition, oid, serial),
compression, checksum, data, value_serial)
def changePartitionTable(self, ptid, cell_list, reset=False):
offset_list = []
q = self.query
if reset:
q("TRUNCATE pt")
for offset, nid, state in cell_list:
# TODO: this logic should move out of database manager
# add 'dropCells(cell_list)' to API and use one query
if state == CellStates.DISCARDED:
q("DELETE FROM pt WHERE rid = %d AND nid = %d"
% (offset, nid))
else:
offset_list.append(offset)
q("INSERT INTO pt VALUES (%d, %d, %d)"
" ON DUPLICATE KEY UPDATE state = %d"
% (offset, nid, state, state))
self.setPTID(ptid)
if self._use_partition:
for offset in offset_list:
add = """ALTER TABLE %%s ADD PARTITION (
PARTITION p%u VALUES IN (%u))""" % (offset, offset)
for table in 'trans', 'obj':
try:
self.conn.query(add % table)
except OperationalError, (code, _):
if code != 1517: # duplicate partition name
raise
def dropPartitions(self, offset_list):
q = self.query
# XXX: these queries are inefficient (execution time increase with
# row count, although we use indexes) when there are rows to
# delete. It should be done as an idle task, by chunks.
for partition in offset_list:
where = " WHERE `partition`=%d" % partition
data_id_list = [x for x, in
q("SELECT DISTINCT data_id FROM obj" + where) if x]
if not self._use_partition:
q("DELETE FROM obj" + where)
q("DELETE FROM trans" + where)
self._pruneData(data_id_list)
if self._use_partition:
drop = "ALTER TABLE %s DROP PARTITION" + \
','.join(' p%u' % i for i in offset_list)
for table in 'trans', 'obj':
try:
self.conn.query(drop % table)
except OperationalError, (code, _):
if code != 1508: # already dropped
raise
def dropUnfinishedData(self):
q = self.query
data_id_list = [x for x, in q("SELECT data_id FROM tobj") if x]
q("TRUNCATE tobj")
q("TRUNCATE ttrans")
self.releaseData(data_id_list, True)
def storeTransaction(self, tid, object_list, transaction, temporary = True):
e = self.escape
u64 = util.u64
tid = u64(tid)
if temporary:
obj_table = 'tobj'
trans_table = 'ttrans'
else:
obj_table = 'obj'
trans_table = 'trans'
q = self.query
for oid, data_id, value_serial in object_list:
oid = u64(oid)
partition = self._getPartition(oid)
if value_serial:
value_serial = u64(value_serial)
(data_id,), = q("SELECT data_id FROM obj"
" WHERE `partition`=%d AND oid=%d AND tid=%d"
% (partition, oid, value_serial))
if temporary:
self.holdData(data_id)
else:
value_serial = 'NULL'
q("REPLACE INTO %s VALUES (%d, %d, %d, %s, %s)" % (obj_table,
partition, oid, tid, data_id or 'NULL', value_serial))
if transaction:
oid_list, user, desc, ext, packed, ttid = transaction
partition = self._getPartition(tid)
assert packed in (0, 1)
q("REPLACE INTO %s VALUES (%d,%d,%i,'%s','%s','%s','%s',%d)" % (
trans_table, partition, tid, packed, e(''.join(oid_list)),
e(user), e(desc), e(ext), u64(ttid)))
if temporary:
self.commit()
_structLL = struct.Struct(">LL")
_unpackLL = _structLL.unpack
def _pruneData(self, data_id_list):
data_id_list = set(data_id_list).difference(self._uncommitted_data)
if data_id_list:
q = self.query
id_list = []
bigid_list = []
for id, value in q("SELECT id, IF(compression < 128, NULL, value)"
" FROM data LEFT JOIN obj ON (id = data_id)"
" WHERE id IN (%s) AND data_id IS NULL"
% ",".join(map(str, data_id_list))):
id_list.append(str(id))
if value:
bigdata_id, length = self._unpackLL(value)
bigid_list += xrange(bigdata_id,
bigdata_id + (length + 0x7fffff >> 23))
if id_list:
q("DELETE FROM data WHERE id IN (%s)" % ",".join(id_list))
if bigid_list:
q("DELETE FROM bigdata WHERE id IN (%s)"
% ",".join(map(str, bigid_list)))
def _bigData(self, value):
bigdata_id, length = self._unpackLL(value)
q = self.query
return (q("SELECT value FROM bigdata WHERE id=%s" % i)[0][0]
for i in xrange(bigdata_id,
bigdata_id + (length + 0x7fffff >> 23)))
def storeData(self, checksum, data, compression, _pack=_structLL.pack):
e = self.escape
checksum = e(checksum)
if 0x1000000 <= len(data): # 16M (MEDIUMBLOB limit)
compression |= 0x80
q = self.query
for r, d in q("SELECT id, value FROM data"
" WHERE hash='%s' AND compression=%s"
% (checksum, compression)):
i = 0
for d in self._bigData(d):
j = i + len(d)
if data[i:j] != d:
raise IntegrityError(DUP_ENTRY)
i = j
if j != len(data):
raise IntegrityError(DUP_ENTRY)
return r
i = 'NULL'
length = len(data)
for j in xrange(0, length, 0x800000): # 8M
q("INSERT INTO bigdata VALUES (%s, '%s')"
% (i, e(data[j:j+0x800000])))
if not j:
i = bigdata_id = self.conn.insert_id()
i += 1
data = _pack(bigdata_id, length)
try:
self.query("INSERT INTO data VALUES (NULL, '%s', %d, '%s')" %
(checksum, compression, e(data)))
except IntegrityError, (code, _):
if code == DUP_ENTRY:
(r, d), = self.query("SELECT id, value FROM data"
" WHERE hash='%s' AND compression=%s"
% (checksum, compression))
if d == data:
return r
raise
return self.conn.insert_id()
del _structLL
def _getDataTID(self, oid, tid=None, before_tid=None):
sql = ('SELECT tid, value_tid FROM obj'
' WHERE `partition` = %d AND oid = %d'
) % (self._getPartition(oid), oid)
if tid is not None:
sql += ' AND tid = %d' % tid
elif before_tid is not None:
sql += ' AND tid < %d ORDER BY tid DESC LIMIT 1' % before_tid
else:
# XXX I want to express "HAVING tid = MAX(tid)", but
# MySQL does not use an index for a HAVING clause!
sql += ' ORDER BY tid DESC LIMIT 1'
r = self.query(sql)
return r[0] if r else (None, None)
def finishTransaction(self, tid):
q = self.query
tid = util.u64(tid)
sql = " FROM tobj WHERE tid=%d" % tid
data_id_list = [x for x, in q("SELECT data_id" + sql) if x]
q("INSERT INTO obj SELECT *" + sql)
q("DELETE FROM tobj WHERE tid=%d" % tid)
q("INSERT INTO trans SELECT * FROM ttrans WHERE tid=%d" % tid)
q("DELETE FROM ttrans WHERE tid=%d" % tid)
self.releaseData(data_id_list)
self.commit()
def deleteTransaction(self, tid, oid_list=()):
u64 = util.u64
tid = u64(tid)
getPartition = self._getPartition
q = self.query
sql = " FROM tobj WHERE tid=%d" % tid
data_id_list = [x for x, in q("SELECT data_id" + sql) if x]
self.releaseData(data_id_list)
q("DELETE" + sql)
q("""DELETE FROM ttrans WHERE tid = %d""" % tid)
q("""DELETE FROM trans WHERE `partition` = %d AND tid = %d""" %
(getPartition(tid), tid))
# delete from obj using indexes
data_id_set = set()
for oid in oid_list:
oid = u64(oid)
sql = " FROM obj WHERE `partition`=%d AND oid=%d AND tid=%d" \
% (getPartition(oid), oid, tid)
data_id_set.update(*q("SELECT data_id" + sql))
q("DELETE" + sql)
data_id_set.discard(None)
self._pruneData(data_id_set)
def deleteObject(self, oid, serial=None):
u64 = util.u64
oid = u64(oid)
sql = " FROM obj WHERE `partition`=%d AND oid=%d" \
% (self._getPartition(oid), oid)
if serial:
sql += ' AND tid=%d' % u64(serial)
q = self.query
data_id_list = [x for x, in q("SELECT DISTINCT data_id" + sql) if x]
q("DELETE" + sql)
self._pruneData(data_id_list)
def _deleteRange(self, partition, min_tid=None, max_tid=None):
sql = " WHERE `partition`=%d" % partition
if min_tid:
sql += " AND %d < tid" % util.u64(min_tid)
if max_tid:
sql += " AND tid <= %d" % util.u64(max_tid)
q = self.query
q("DELETE FROM trans" + sql)
sql = " FROM obj" + sql
data_id_list = [x for x, in q("SELECT DISTINCT data_id" + sql) if x]
q("DELETE" + sql)
self._pruneData(data_id_list)
def getTransaction(self, tid, all = False):
tid = util.u64(tid)
q = self.query
r = q("SELECT oids, user, description, ext, packed, ttid"
" FROM trans WHERE `partition` = %d AND tid = %d"
% (self._getPartition(tid), tid))
if not r and all:
r = q("SELECT oids, user, description, ext, packed, ttid"
" FROM ttrans WHERE tid = %d" % tid)
if r:
oids, user, desc, ext, packed, ttid = r[0]
oid_list = splitOIDField(tid, oids)
return oid_list, user, desc, ext, bool(packed), util.p64(ttid)
def getObjectHistory(self, oid, offset, length):
# FIXME: This method doesn't take client's current ransaction id as
# parameter, which means it can return transactions in the future of
# client's transaction.
oid = util.u64(oid)
p64 = util.p64
r = self.query("SELECT tid, IF(compression < 128, LENGTH(value),"
" CAST(CONV(HEX(SUBSTR(value, 5, 4)), 16, 10) AS INT))"
" FROM obj LEFT JOIN data ON (obj.data_id = data.id)"
" WHERE `partition` = %d AND oid = %d AND tid >= %d"
" ORDER BY tid DESC LIMIT %d, %d" %
(self._getPartition(oid), oid, self._getPackTID(), offset, length))
if r:
return [(p64(tid), length or 0) for tid, length in r]
def getReplicationObjectList(self, min_tid, max_tid, length, partition,
min_oid):
u64 = util.u64
p64 = util.p64
min_tid = u64(min_tid)
r = self.query('SELECT tid, oid FROM obj'
' WHERE `partition` = %d AND tid <= %d'
' AND (tid = %d AND %d <= oid OR %d < tid)'
' ORDER BY tid ASC, oid ASC LIMIT %d' % (
partition, u64(max_tid), min_tid, u64(min_oid), min_tid, length))
return [(p64(serial), p64(oid)) for serial, oid in r]
def getTIDList(self, offset, length, partition_list):
q = self.query
r = q("""SELECT tid FROM trans WHERE `partition` in (%s)
ORDER BY tid DESC LIMIT %d,%d""" \
% (','.join(map(str, partition_list)), offset, length))
return [util.p64(t[0]) for t in r]
def getReplicationTIDList(self, min_tid, max_tid, length, partition):
u64 = util.u64
p64 = util.p64
min_tid = u64(min_tid)
max_tid = u64(max_tid)
r = self.query("""SELECT tid FROM trans
WHERE `partition` = %(partition)d
AND tid >= %(min_tid)d AND tid <= %(max_tid)d
ORDER BY tid ASC LIMIT %(length)d""" % {
'partition': partition,
'min_tid': min_tid,
'max_tid': max_tid,
'length': length,
})
return [p64(t[0]) for t in r]
def _updatePackFuture(self, oid, orig_serial, max_serial):
q = self.query
# Before deleting this objects revision, see if there is any
# transaction referencing its value at max_serial or above.
# If there is, copy value to the first future transaction. Any further
# reference is just updated to point to the new data location.
value_serial = None
kw = {
'partition': self._getPartition(oid),
'oid': oid,
'orig_tid': orig_serial,
'max_tid': max_serial,
'new_tid': 'NULL',
}
for kw['table'] in 'obj', 'tobj':
for kw['tid'], in q('SELECT tid FROM %(table)s'
' WHERE `partition`=%(partition)d AND oid=%(oid)d'
' AND tid>=%(max_tid)d AND value_tid=%(orig_tid)d'
' ORDER BY tid ASC' % kw):
q('UPDATE %(table)s SET value_tid=%(new_tid)s'
' WHERE `partition`=%(partition)d AND oid=%(oid)d'
' AND tid=%(tid)d' % kw)
if value_serial is None:
# First found, mark its serial for future reference.
kw['new_tid'] = value_serial = kw['tid']
return value_serial
def pack(self, tid, updateObjectDataForPack):
# TODO: unit test (along with updatePackFuture)
p64 = util.p64
tid = util.u64(tid)
updatePackFuture = self._updatePackFuture
getPartition = self._getPartition
q = self.query
self._setPackTID(tid)
for count, oid, max_serial in q("SELECT COUNT(*) - 1, oid, MAX(tid)"
" FROM obj WHERE tid <= %d GROUP BY oid"
% tid):
partition = getPartition(oid)
if q("SELECT 1 FROM obj WHERE `partition` = %d"
" AND oid = %d AND tid = %d AND data_id IS NULL"
% (partition, oid, max_serial)):
max_serial += 1
elif not count:
continue
# There are things to delete for this object
data_id_set = set()
sql = ' FROM obj WHERE `partition`=%d AND oid=%d' \
' AND tid<%d' % (partition, oid, max_serial)
for serial, data_id in q('SELECT tid, data_id' + sql):
data_id_set.add(data_id)
new_serial = updatePackFuture(oid, serial, max_serial)
if new_serial:
new_serial = p64(new_serial)
updateObjectDataForPack(p64(oid), p64(serial),
new_serial, data_id)
q('DELETE' + sql)
data_id_set.discard(None)
self._pruneData(data_id_set)
self.commit()
def checkTIDRange(self, partition, length, min_tid, max_tid):
count, tid_checksum, max_tid = self.query(
"""SELECT COUNT(*), SHA1(GROUP_CONCAT(tid SEPARATOR ",")), MAX(tid)
FROM (SELECT tid FROM trans
WHERE `partition` = %(partition)s
AND tid >= %(min_tid)d
AND tid <= %(max_tid)d
ORDER BY tid ASC %(limit)s) AS t""" % {
'partition': partition,
'min_tid': util.u64(min_tid),
'max_tid': util.u64(max_tid),
'limit': '' if length is None else 'LIMIT %u' % length,
})[0]
if count:
return count, a2b_hex(tid_checksum), util.p64(max_tid)
return 0, ZERO_HASH, ZERO_TID
def checkSerialRange(self, partition, length, min_tid, max_tid, min_oid):
u64 = util.u64
# We don't ask MySQL to compute everything (like in checkTIDRange)
# because it's difficult to get the last serial _for the last oid_.
# We would need a function (that could be named 'LAST') that returns the
# last grouped value, instead of the greatest one.
r = self.query(
"""SELECT tid, oid
FROM obj
WHERE `partition` = %(partition)s
AND tid <= %(max_tid)d
AND (tid > %(min_tid)d OR
tid = %(min_tid)d AND oid >= %(min_oid)d)
ORDER BY tid, oid %(limit)s""" % {
'min_oid': u64(min_oid),
'min_tid': u64(min_tid),
'max_tid': u64(max_tid),
'limit': '' if length is None else 'LIMIT %u' % length,
'partition': partition,
})
if r:
p64 = util.p64
return (len(r),
sha1(','.join(str(x[0]) for x in r)).digest(),
p64(r[-1][0]),
sha1(','.join(str(x[1]) for x in r)).digest(),
p64(r[-1][1]))
return 0, ZERO_HASH, ZERO_TID, ZERO_HASH, ZERO_OID
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/storage/database/sqlite.py 0000664 0000000 0000000 00000060376 12601037530 0026754 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2012-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import sqlite3
from hashlib import sha1
import string
import traceback
from . import DatabaseManager, LOG_QUERIES
from .manager import CreationUndone, splitOIDField
from neo.lib import logging, util
from neo.lib.exception import DatabaseFailure
from neo.lib.protocol import CellStates, ZERO_OID, ZERO_TID, ZERO_HASH
def unique_constraint_message(table, *columns):
c = sqlite3.connect(":memory:")
values = '?' * len(columns)
insert = "INSERT INTO %s VALUES(%s)" % (table, ', '.join(values))
x = "%s (%s)" % (table, ', '.join(columns))
c.execute("CREATE TABLE " + x)
c.execute("CREATE UNIQUE INDEX i ON " + x)
try:
c.executemany(insert, (values, values))
except sqlite3.IntegrityError, e:
return e.args[0]
assert False
def retry_if_locked(f, *args):
try:
return f(*args)
except sqlite3.OperationalError, e:
x = e.args[0]
if x == 'database is locked':
msg = traceback.format_exception_only(type(e), e)
msg += traceback.format_stack()
logging.warning(''.join(msg))
while e.args[0] == x:
try:
return f(*args)
except sqlite3.OperationalError, e:
pass
raise
class SQLiteDatabaseManager(DatabaseManager):
"""This class manages a database on SQLite.
CAUTION: Make sure we never use statement journal files, as explained at
http://www.sqlite.org/tempfiles.html for more information.
In other words, temporary files (by default in /var/tmp !) must
never be used for small requests.
"""
def __init__(self, *args, **kw):
super(SQLiteDatabaseManager, self).__init__(*args, **kw)
self._config = {}
self._connect()
def _parse(self, database):
self.db = database
def close(self):
self.conn.close()
def _connect(self):
logging.info('connecting to SQLite database %r', self.db)
self.conn = sqlite3.connect(self.db, check_same_thread=False)
def commit(self):
logging.debug('committing...')
retry_if_locked(self.conn.commit)
if LOG_QUERIES:
def query(self, query):
printable_char_list = []
for c in query.split('\n', 1)[0][:70]:
if c not in string.printable or c in '\t\x0b\x0c\r':
c = '\\x%02x' % ord(c)
printable_char_list.append(c)
logging.debug('querying %s...', ''.join(printable_char_list))
return self.conn.execute(query)
else:
query = property(lambda self: self.conn.execute)
def erase(self):
for t in 'config', 'pt', 'trans', 'obj', 'data', 'ttrans', 'tobj':
self.query('DROP TABLE IF EXISTS ' + t)
def _setup(self):
self._config.clear()
q = self.query
# The table "config" stores configuration parameters which affect the
# persistent data.
q("""CREATE TABLE IF NOT EXISTS config (
name TEXT NOT NULL PRIMARY KEY,
value TEXT)
""")
# The table "pt" stores a partition table.
q("""CREATE TABLE IF NOT EXISTS pt (
rid INTEGER NOT NULL,
nid INTEGER NOT NULL,
state INTEGER NOT NULL,
PRIMARY KEY (rid, nid))
""")
# The table "trans" stores information on committed transactions.
q("""CREATE TABLE IF NOT EXISTS trans (
partition INTEGER NOT NULL,
tid INTEGER NOT NULL,
packed BOOLEAN NOT NULL,
oids BLOB NOT NULL,
user BLOB NOT NULL,
description BLOB NOT NULL,
ext BLOB NOT NULL,
ttid INTEGER NOT NULL,
PRIMARY KEY (partition, tid))
""")
# The table "obj" stores committed object metadata.
q("""CREATE TABLE IF NOT EXISTS obj (
partition INTEGER NOT NULL,
oid INTEGER NOT NULL,
tid INTEGER NOT NULL,
data_id INTEGER,
value_tid INTEGER,
PRIMARY KEY (partition, tid, oid))
""")
q("""CREATE INDEX IF NOT EXISTS _obj_i1 ON
obj(partition, oid, tid)
""")
q("""CREATE INDEX IF NOT EXISTS _obj_i2 ON
obj(data_id)
""")
# The table "data" stores object data.
q("""CREATE TABLE IF NOT EXISTS data (
id INTEGER PRIMARY KEY AUTOINCREMENT,
hash BLOB NOT NULL,
compression INTEGER NOT NULL,
value BLOB NOT NULL)
""")
q("""CREATE UNIQUE INDEX IF NOT EXISTS _data_i1 ON
data(hash, compression)
""")
# The table "ttrans" stores information on uncommitted transactions.
q("""CREATE TABLE IF NOT EXISTS ttrans (
partition INTEGER NOT NULL,
tid INTEGER NOT NULL,
packed BOOLEAN NOT NULL,
oids BLOB NOT NULL,
user BLOB NOT NULL,
description BLOB NOT NULL,
ext BLOB NOT NULL,
ttid INTEGER NOT NULL)
""")
# The table "tobj" stores uncommitted object metadata.
q("""CREATE TABLE IF NOT EXISTS tobj (
partition INTEGER NOT NULL,
oid INTEGER NOT NULL,
tid INTEGER NOT NULL,
data_id INTEGER,
value_tid INTEGER,
PRIMARY KEY (tid, oid))
""")
self._uncommitted_data.update(q("SELECT data_id, count(*)"
" FROM tobj WHERE data_id IS NOT NULL GROUP BY data_id"))
def getConfiguration(self, key):
try:
return self._config[key]
except KeyError:
try:
r = self.query("SELECT value FROM config WHERE name=?",
(key,)).fetchone()[0]
except TypeError:
r = None
self._config[key] = r
return r
def _setConfiguration(self, key, value):
q = self.query
self._config[key] = value
if value is None:
q("DELETE FROM config WHERE name=?", (key,))
else:
q("REPLACE INTO config VALUES (?,?)", (key, str(value)))
def getPartitionTable(self):
return self.query("SELECT * FROM pt")
def getLastTID(self, max_tid):
return self.query("SELECT MAX(tid) FROM trans WHERE tid<=?",
(max_tid,)).next()[0]
def _getLastIDs(self, all=True):
p64 = util.p64
q = self.query
trans = {partition: p64(tid)
for partition, tid in q("SELECT partition, MAX(tid)"
" FROM trans GROUP BY partition")}
obj = {partition: p64(tid)
for partition, tid in q("SELECT partition, MAX(tid)"
" FROM obj GROUP BY partition")}
oid = q("SELECT MAX(oid) FROM (SELECT MAX(oid) AS oid FROM obj"
" GROUP BY partition) as t").next()[0]
if all:
tid = q("SELECT MAX(tid) FROM ttrans").next()[0]
if tid is not None:
trans[None] = p64(tid)
tid, toid = q("SELECT MAX(tid), MAX(oid) FROM tobj").next()
if tid is not None:
obj[None] = p64(tid)
if toid is not None and (oid < toid or oid is None):
oid = toid
return trans, obj, None if oid is None else p64(oid)
def getUnfinishedTIDList(self):
p64 = util.p64
return [p64(t[0]) for t in self.query("SELECT tid FROM ttrans"
" UNION SELECT tid FROM tobj")]
def objectPresent(self, oid, tid, all=True):
oid = util.u64(oid)
tid = util.u64(tid)
q = self.query
return q("SELECT 1 FROM obj WHERE partition=? AND oid=? AND tid=?",
(self._getPartition(oid), oid, tid)).fetchone() or all and \
q("SELECT 1 FROM tobj WHERE tid=? AND oid=?",
(tid, oid)).fetchone()
def getLastObjectTID(self, oid):
oid = util.u64(oid)
r = self.query("SELECT tid FROM obj"
" WHERE partition=? AND oid=?"
" ORDER BY tid DESC LIMIT 1",
(self._getPartition(oid), oid)).fetchone()
return r and util.p64(r[0])
def _getNextTID(self, *args): # partition, oid, tid
r = self.query("""SELECT tid FROM obj
WHERE partition=? AND oid=? AND tid>?
ORDER BY tid LIMIT 1""", args).fetchone()
return r and r[0]
def _getObject(self, oid, tid=None, before_tid=None):
q = self.query
partition = self._getPartition(oid)
sql = ('SELECT tid, compression, data.hash, value, value_tid'
' FROM obj LEFT JOIN data ON obj.data_id = data.id'
' WHERE partition=? AND oid=?')
if tid is not None:
r = q(sql + ' AND tid=?', (partition, oid, tid))
elif before_tid is not None:
r = q(sql + ' AND tid ORDER BY tid DESC LIMIT 1',
(partition, oid, before_tid))
else:
r = q(sql + ' ORDER BY tid DESC LIMIT 1', (partition, oid))
try:
serial, compression, checksum, data, value_serial = r.fetchone()
except TypeError:
return None
if checksum:
checksum = str(checksum)
data = str(data)
return (serial, self._getNextTID(partition, oid, serial),
compression, checksum, data, value_serial)
def changePartitionTable(self, ptid, cell_list, reset=False):
q = self.query
if reset:
q("DELETE FROM pt")
for offset, nid, state in cell_list:
# TODO: this logic should move out of database manager
# add 'dropCells(cell_list)' to API and use one query
# WKRD: Why does SQLite need a statement journal file
# whereas we try to replace only 1 value ?
# We don't want to remove the 'NOT NULL' constraint
# so we must simulate a "REPLACE OR FAIL".
q("DELETE FROM pt WHERE rid=? AND nid=?", (offset, nid))
if state != CellStates.DISCARDED:
q("INSERT OR FAIL INTO pt VALUES (?,?,?)",
(offset, nid, int(state)))
self.setPTID(ptid)
def dropPartitions(self, offset_list):
where = " WHERE partition=?"
q = self.query
for partition in offset_list:
args = partition,
data_id_list = [x for x, in
q("SELECT DISTINCT data_id FROM obj" + where, args) if x]
q("DELETE FROM obj" + where, args)
q("DELETE FROM trans" + where, args)
self._pruneData(data_id_list)
def dropUnfinishedData(self):
q = self.query
data_id_list = [x for x, in q("SELECT data_id FROM tobj") if x]
q("DELETE FROM tobj")
q("DELETE FROM ttrans")
self.releaseData(data_id_list, True)
def storeTransaction(self, tid, object_list, transaction, temporary=True):
u64 = util.u64
tid = u64(tid)
T = 't' if temporary else ''
obj_sql = "INSERT OR FAIL INTO %sobj VALUES (?,?,?,?,?)" % T
q = self.query
for oid, data_id, value_serial in object_list:
oid = u64(oid)
partition = self._getPartition(oid)
if value_serial:
value_serial = u64(value_serial)
(data_id,), = q("SELECT data_id FROM obj"
" WHERE partition=? AND oid=? AND tid=?",
(partition, oid, value_serial))
if temporary:
self.holdData(data_id)
try:
q(obj_sql, (partition, oid, tid, data_id, value_serial))
except sqlite3.IntegrityError:
# This may happen if a previous replication of 'obj' was
# interrupted.
if not T:
r, = q("SELECT data_id, value_tid FROM obj"
" WHERE partition=? AND oid=? AND tid=?",
(partition, oid, tid))
if r == (data_id, value_serial):
continue
raise
if transaction:
oid_list, user, desc, ext, packed, ttid = transaction
partition = self._getPartition(tid)
assert packed in (0, 1)
q("INSERT OR FAIL INTO %strans VALUES (?,?,?,?,?,?,?,?)" % T,
(partition, tid, packed, buffer(''.join(oid_list)),
buffer(user), buffer(desc), buffer(ext), u64(ttid)))
if temporary:
self.commit()
def _pruneData(self, data_id_list):
data_id_list = set(data_id_list).difference(self._uncommitted_data)
if data_id_list:
q = self.query
data_id_list.difference_update(x for x, in q(
"SELECT DISTINCT data_id FROM obj WHERE data_id IN (%s)"
% ",".join(map(str, data_id_list))))
q("DELETE FROM data WHERE id IN (%s)"
% ",".join(map(str, data_id_list)))
def storeData(self, checksum, data, compression,
_dup=unique_constraint_message("data", "hash", "compression")):
H = buffer(checksum)
try:
return self.query("INSERT INTO data VALUES (NULL,?,?,?)",
(H, compression, buffer(data))).lastrowid
except sqlite3.IntegrityError, e:
if e.args[0] == _dup:
(r, d), = self.query("SELECT id, value FROM data"
" WHERE hash=? AND compression=?",
(H, compression))
if str(d) == data:
return r
raise
def _getDataTID(self, oid, tid=None, before_tid=None):
partition = self._getPartition(oid)
sql = 'SELECT tid, value_tid FROM obj' \
' WHERE partition=? AND oid=?'
if tid is not None:
r = self.query(sql + ' AND tid=?', (partition, oid, tid))
elif before_tid is not None:
r = self.query(sql + ' AND tid ORDER BY tid DESC LIMIT 1',
(partition, oid, before_tid))
else:
r = self.query(sql + ' ORDER BY tid DESC LIMIT 1',
(partition, oid))
r = r.fetchone()
return r or (None, None)
def finishTransaction(self, tid):
args = util.u64(tid),
q = self.query
sql = " FROM tobj WHERE tid=?"
data_id_list = [x for x, in q("SELECT data_id" + sql, args) if x]
q("INSERT OR FAIL INTO obj SELECT *" + sql, args)
q("DELETE FROM tobj WHERE tid=?", args)
q("INSERT OR FAIL INTO trans SELECT * FROM ttrans WHERE tid=?", args)
q("DELETE FROM ttrans WHERE tid=?", args)
self.releaseData(data_id_list)
self.commit()
def deleteTransaction(self, tid, oid_list=()):
u64 = util.u64
tid = u64(tid)
getPartition = self._getPartition
q = self.query
sql = " FROM tobj WHERE tid=?"
data_id_list = [x for x, in q("SELECT data_id" + sql, (tid,)) if x]
self.releaseData(data_id_list)
q("DELETE" + sql, (tid,))
q("DELETE FROM ttrans WHERE tid=?", (tid,))
q("DELETE FROM trans WHERE partition=? AND tid=?",
(getPartition(tid), tid))
# delete from obj using indexes
data_id_set = set()
for oid in oid_list:
oid = u64(oid)
sql = " FROM obj WHERE partition=? AND oid=? AND tid=?"
args = getPartition(oid), oid, tid
data_id_set.update(*q("SELECT data_id" + sql, args))
q("DELETE" + sql, args)
data_id_set.discard(None)
self._pruneData(data_id_set)
def deleteObject(self, oid, serial=None):
oid = util.u64(oid)
sql = " FROM obj WHERE partition=? AND oid=?"
args = [self._getPartition(oid), oid]
if serial:
sql += " AND tid=?"
args.append(util.u64(serial))
q = self.query
data_id_list = [x for x, in q("SELECT DISTINCT data_id" + sql, args)
if x]
q("DELETE" + sql, args)
self._pruneData(data_id_list)
def _deleteRange(self, partition, min_tid=None, max_tid=None):
sql = " WHERE partition=?"
args = [partition]
if min_tid:
sql += " AND ? < tid"
args.append(util.u64(min_tid))
if max_tid:
sql += " AND tid <= ?"
args.append(util.u64(max_tid))
q = self.query
q("DELETE FROM trans" + sql, args)
sql = " FROM obj" + sql
data_id_list = [x for x, in q("SELECT DISTINCT data_id" + sql, args)
if x]
q("DELETE" + sql, args)
self._pruneData(data_id_list)
def getTransaction(self, tid, all=False):
tid = util.u64(tid)
q = self.query
r = q("SELECT oids, user, description, ext, packed, ttid"
" FROM trans WHERE partition=? AND tid=?",
(self._getPartition(tid), tid)).fetchone()
if not r and all:
r = q("SELECT oids, user, description, ext, packed, ttid"
" FROM ttrans WHERE tid=?", (tid,)).fetchone()
if r:
oids, user, description, ext, packed, ttid = r
return splitOIDField(tid, oids), str(user), \
str(description), str(ext), packed, util.p64(ttid)
def getObjectHistory(self, oid, offset, length):
# FIXME: This method doesn't take client's current transaction id as
# parameter, which means it can return transactions in the future of
# client's transaction.
p64 = util.p64
oid = util.u64(oid)
return [(p64(tid), length or 0) for tid, length in self.query("""\
SELECT tid, LENGTH(value)
FROM obj LEFT JOIN data ON obj.data_id = data.id
WHERE partition=? AND oid=? AND tid>=?
ORDER BY tid DESC LIMIT ?,?""",
(self._getPartition(oid), oid, self._getPackTID(), offset, length))
] or None
def getReplicationObjectList(self, min_tid, max_tid, length, partition,
min_oid):
u64 = util.u64
p64 = util.p64
min_tid = u64(min_tid)
return [(p64(serial), p64(oid)) for serial, oid in self.query("""\
SELECT tid, oid FROM obj
WHERE partition=? AND tid<=?
AND (tid=? AND ?<=oid OR ?=? AND value_tid=?
ORDER BY tid ASC""" % T,
(partition, oid, max_serial, orig_serial)):
q(update, (value_serial, partition, oid, serial))
if value_serial is None:
# First found, mark its serial for future reference.
value_serial = serial
return value_serial
def pack(self, tid, updateObjectDataForPack):
# TODO: unit test (along with updatePackFuture)
p64 = util.p64
tid = util.u64(tid)
updatePackFuture = self._updatePackFuture
getPartition = self._getPartition
q = self.query
self._setPackTID(tid)
for count, oid, max_serial in q("SELECT COUNT(*) - 1, oid, MAX(tid)"
" FROM obj WHERE tid<=? GROUP BY oid",
(tid,)):
partition = getPartition(oid)
if q("SELECT 1 FROM obj WHERE partition=?"
" AND oid=? AND tid=? AND data_id IS NULL",
(partition, oid, max_serial)).fetchone():
max_serial += 1
elif not count:
continue
# There are things to delete for this object
data_id_set = set()
sql = " FROM obj WHERE partition=? AND oid=? AND tid"
args = partition, oid, max_serial
for serial, data_id in q("SELECT tid, data_id" + sql, args):
data_id_set.add(data_id)
new_serial = updatePackFuture(oid, serial, max_serial)
if new_serial:
new_serial = p64(new_serial)
updateObjectDataForPack(p64(oid), p64(serial),
new_serial, data_id)
q("DELETE" + sql, args)
data_id_set.discard(None)
self._pruneData(data_id_set)
self.commit()
def checkTIDRange(self, partition, length, min_tid, max_tid):
# XXX: SQLite's GROUP_CONCAT is slow (looks like quadratic)
count, tids, max_tid = self.query("""\
SELECT COUNT(*), GROUP_CONCAT(tid), MAX(tid)
FROM (SELECT tid FROM trans
WHERE partition=? AND ?<=tid AND tid<=?
ORDER BY tid ASC LIMIT ?) AS t""",
(partition, util.u64(min_tid), util.u64(max_tid),
-1 if length is None else length)).fetchone()
if count:
return count, sha1(tids).digest(), util.p64(max_tid)
return 0, ZERO_HASH, ZERO_TID
def checkSerialRange(self, partition, length, min_tid, max_tid, min_oid):
u64 = util.u64
# We don't ask SQLite to compute everything (like in checkTIDRange)
# because it's difficult to get the last serial _for the last oid_.
# We would need a function (that could be named 'LAST') that returns the
# last grouped value, instead of the greatest one.
min_tid = u64(min_tid)
r = self.query("""\
SELECT tid, oid
FROM obj
WHERE partition=? AND tid<=? AND (tid>? OR tid=? AND oid>=?)
ORDER BY tid, oid LIMIT ?""",
(partition, u64(max_tid), min_tid, min_tid, u64(min_oid),
-1 if length is None else length)).fetchall()
if r:
p64 = util.p64
return (len(r),
sha1(','.join(str(x[0]) for x in r)).digest(),
p64(r[-1][0]),
sha1(','.join(str(x[1]) for x in r)).digest(),
p64(r[-1][1]))
return 0, ZERO_HASH, ZERO_TID, ZERO_HASH, ZERO_OID
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/storage/exception.py 0000664 0000000 0000000 00000001324 12601037530 0025671 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2010-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
class AlreadyPendingError(Exception):
pass
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/storage/handlers/ 0000775 0000000 0000000 00000000000 12601037530 0025121 5 ustar 00root root 0000000 0000000 neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/storage/handlers/__init__.py 0000664 0000000 0000000 00000004643 12601037530 0027241 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2006-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
from neo.lib import logging
from neo.lib.handler import EventHandler
from neo.lib.exception import PrimaryFailure, OperationFailure
from neo.lib.protocol import uuid_str, NodeStates, NodeTypes
class BaseMasterHandler(EventHandler):
def connectionLost(self, conn, new_state):
if self.app.listening_conn: # if running
self.app.master_node = None
raise PrimaryFailure('connection lost')
def stopOperation(self, conn):
raise OperationFailure('operation stopped')
def reelectPrimary(self, conn):
raise PrimaryFailure('re-election occurs')
def notifyClusterInformation(self, conn, state):
self.app.changeClusterState(state)
def notifyNodeInformation(self, conn, node_list):
"""Store information on nodes, only if this is sent by a primary
master node."""
self.app.nm.update(node_list)
for node_type, addr, uuid, state in node_list:
if uuid == self.app.uuid:
# This is me, do what the master tell me
logging.info("I was told I'm %s", state)
if state in (NodeStates.DOWN, NodeStates.TEMPORARILY_DOWN,
NodeStates.BROKEN, NodeStates.UNKNOWN):
erase = state == NodeStates.DOWN
self.app.shutdown(erase=erase)
elif state == NodeStates.HIDDEN:
raise OperationFailure
elif node_type == NodeTypes.CLIENT and state != NodeStates.RUNNING:
logging.info('Notified of non-running client, abort (%s)',
uuid_str(uuid))
self.app.tm.abortFor(uuid)
def answerUnfinishedTransactions(self, conn, *args, **kw):
self.app.replicator.setUnfinishedTIDList(*args, **kw)
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/storage/handlers/client.py 0000664 0000000 0000000 00000022151 12601037530 0026752 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2006-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
from neo.lib import logging
from neo.lib.handler import EventHandler
from neo.lib.util import dump, makeChecksum
from neo.lib.protocol import Packets, LockState, Errors, ProtocolError, \
ZERO_HASH, INVALID_PARTITION
from ..transactions import ConflictError, DelayedError
from ..exception import AlreadyPendingError
import time
# Log stores taking (incl. lock delays) more than this many seconds.
# Set to None to disable.
SLOW_STORE = 2
class ClientOperationHandler(EventHandler):
def askTransactionInformation(self, conn, tid):
t = self.app.dm.getTransaction(tid)
if t is None:
p = Errors.TidNotFound('%s does not exist' % dump(tid))
else:
p = Packets.AnswerTransactionInformation(tid, t[1], t[2], t[3],
t[4], t[0])
conn.answer(p)
def askObject(self, conn, oid, serial, tid):
app = self.app
if app.tm.loadLocked(oid):
# Delay the response.
app.queueEvent(self.askObject, conn, (oid, serial, tid))
return
o = app.dm.getObject(oid, serial, tid)
try:
serial, next_serial, compression, checksum, data, data_serial = o
except TypeError:
p = (Errors.OidDoesNotExist if o is None else
Errors.OidNotFound)(dump(oid))
else:
if checksum is None:
checksum = ZERO_HASH
data = ''
p = Packets.AnswerObject(oid, serial, next_serial,
compression, checksum, data, data_serial)
conn.answer(p)
def connectionLost(self, conn, new_state):
uuid = conn.getUUID()
node = self.app.nm.getByUUID(uuid)
if self.app.listening_conn: # if running
assert node is not None, conn
self.app.nm.remove(node)
def abortTransaction(self, conn, ttid):
self.app.tm.abort(ttid)
def askStoreTransaction(self, conn, ttid, user, desc, ext, oid_list):
self.app.tm.register(conn.getUUID(), ttid)
self.app.tm.storeTransaction(ttid, oid_list, user, desc, ext, False)
conn.answer(Packets.AnswerStoreTransaction(ttid))
def _askStoreObject(self, conn, oid, serial, compression, checksum, data,
data_serial, ttid, unlock, request_time):
if ttid not in self.app.tm:
# transaction was aborted, cancel this event
logging.info('Forget store of %s:%s by %s delayed by %s',
dump(oid), dump(serial), dump(ttid),
dump(self.app.tm.getLockingTID(oid)))
# send an answer as the client side is waiting for it
conn.answer(Packets.AnswerStoreObject(0, oid, serial))
return
try:
self.app.tm.storeObject(ttid, serial, oid, compression,
checksum, data, data_serial, unlock)
except ConflictError, err:
# resolvable or not
conn.answer(Packets.AnswerStoreObject(1, oid, err.getTID()))
except DelayedError:
# locked by a previous transaction, retry later
# If we are unlocking, we want queueEvent to raise
# AlreadyPendingError, to avoid making lcient wait for an unneeded
# response.
try:
self.app.queueEvent(self._askStoreObject, conn, (oid, serial,
compression, checksum, data, data_serial, ttid,
unlock, request_time), key=(oid, ttid),
raise_on_duplicate=unlock)
except AlreadyPendingError:
conn.answer(Errors.AlreadyPending(dump(oid)))
else:
if SLOW_STORE is not None:
duration = time.time() - request_time
if duration > SLOW_STORE:
logging.info('StoreObject delay: %.02fs', duration)
conn.answer(Packets.AnswerStoreObject(0, oid, serial))
def askStoreObject(self, conn, oid, serial,
compression, checksum, data, data_serial, ttid, unlock):
if 1 < compression:
raise ProtocolError('invalid compression value')
# register the transaction
self.app.tm.register(conn.getUUID(), ttid)
if data or checksum != ZERO_HASH:
# TODO: return an appropriate error packet
assert makeChecksum(data) == checksum
assert data_serial is None
else:
checksum = data = None
self._askStoreObject(conn, oid, serial, compression, checksum, data,
data_serial, ttid, unlock, time.time())
def askTIDsFrom(self, conn, min_tid, max_tid, length, partition):
conn.answer(Packets.AnswerTIDsFrom(self.app.dm.getReplicationTIDList(
min_tid, max_tid, length, partition)))
def askTIDs(self, conn, first, last, partition):
# This method is complicated, because I must return TIDs only
# about usable partitions assigned to me.
if first >= last:
raise ProtocolError('invalid offsets')
app = self.app
if partition == INVALID_PARTITION:
partition_list = app.pt.getAssignedPartitionList(app.uuid)
else:
partition_list = [partition]
tid_list = app.dm.getTIDList(first, last - first, partition_list)
conn.answer(Packets.AnswerTIDs(tid_list))
def askObjectUndoSerial(self, conn, ttid, ltid, undone_tid, oid_list):
app = self.app
findUndoTID = app.dm.findUndoTID
getObjectFromTransaction = app.tm.getObjectFromTransaction
object_tid_dict = {}
for oid in oid_list:
current_serial, undo_serial, is_current = findUndoTID(oid, ttid,
ltid, undone_tid, getObjectFromTransaction(ttid, oid))
if current_serial is None:
p = Errors.OidNotFound(dump(oid))
break
object_tid_dict[oid] = (current_serial, undo_serial, is_current)
else:
p = Packets.AnswerObjectUndoSerial(object_tid_dict)
conn.answer(p)
def askHasLock(self, conn, ttid, oid):
locking_tid = self.app.tm.getLockingTID(oid)
logging.info('%r check lock of %r:%r', conn, dump(ttid), dump(oid))
if locking_tid is None:
state = LockState.NOT_LOCKED
elif locking_tid is ttid:
state = LockState.GRANTED
else:
state = LockState.GRANTED_TO_OTHER
conn.answer(Packets.AnswerHasLock(oid, state))
def askObjectHistory(self, conn, oid, first, last):
if first >= last:
raise ProtocolError('invalid offsets')
app = self.app
if app.tm.loadLocked(oid):
# Delay the response.
app.queueEvent(self.askObjectHistory, conn, (oid, first, last))
return
history_list = app.dm.getObjectHistory(oid, first, last - first)
if history_list is None:
p = Errors.OidNotFound(dump(oid))
else:
p = Packets.AnswerObjectHistory(oid, history_list)
conn.answer(p)
def askCheckCurrentSerial(self, conn, ttid, serial, oid):
self.app.tm.register(conn.getUUID(), ttid)
self._askCheckCurrentSerial(conn, ttid, serial, oid, time.time())
def _askCheckCurrentSerial(self, conn, ttid, serial, oid, request_time):
if ttid not in self.app.tm:
# transaction was aborted, cancel this event
logging.info('Forget serial check of %s:%s by %s delayed by %s',
dump(oid), dump(serial), dump(ttid),
dump(self.app.tm.getLockingTID(oid)))
# send an answer as the client side is waiting for it
conn.answer(Packets.AnswerCheckCurrentSerial(0, oid, serial))
return
try:
self.app.tm.checkCurrentSerial(ttid, serial, oid)
except ConflictError, err:
# resolvable or not
conn.answer(Packets.AnswerCheckCurrentSerial(1, oid,
err.getTID()))
except DelayedError:
# locked by a previous transaction, retry later
try:
self.app.queueEvent(self._askCheckCurrentSerial, conn, (ttid,
serial, oid, request_time), key=(oid, ttid))
except AlreadyPendingError:
conn.answer(Errors.AlreadyPending(dump(oid)))
else:
if SLOW_STORE is not None:
duration = time.time() - request_time
if duration > SLOW_STORE:
logging.info('CheckCurrentSerial delay: %.02fs', duration)
conn.answer(Packets.AnswerCheckCurrentSerial(0, oid, serial))
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/storage/handlers/hidden.py 0000664 0000000 0000000 00000003620 12601037530 0026727 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2006-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
from . import BaseMasterHandler
from neo.lib import logging
from neo.lib.protocol import CellStates
class HiddenHandler(BaseMasterHandler):
"""This class implements a generic part of the event handlers."""
def notifyPartitionChanges(self, conn, ptid, cell_list):
"""This is very similar to Send Partition Table, except that
the information is only about changes from the previous."""
app = self.app
if ptid <= app.pt.getID():
# Ignore this packet.
logging.debug('ignoring older partition changes')
return
# update partition table in memory and the database
app.pt.update(ptid, cell_list, app.nm)
app.dm.changePartitionTable(ptid, cell_list)
# Check changes for replications
for offset, uuid, state in cell_list:
if uuid == app.uuid and app.replicator is not None:
# If this is for myself, this can affect replications.
if state == CellStates.DISCARDED:
app.replicator.removePartition(offset)
elif state == CellStates.OUT_OF_DATE:
app.replicator.addPartition(offset)
def startOperation(self, conn):
self.app.operational = True
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/storage/handlers/identification.py 0000664 0000000 0000000 00000007031 12601037530 0030465 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2006-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
from neo.lib import logging
from neo.lib.handler import EventHandler
from neo.lib.protocol import uuid_str, NodeTypes, NotReadyError, Packets
from neo.lib.protocol import ProtocolError, BrokenNodeDisallowedError
from .storage import StorageOperationHandler
from .client import ClientOperationHandler
class IdentificationHandler(EventHandler):
""" Handler used for incoming connections during operation state """
def connectionLost(self, conn, new_state):
logging.warning('A connection was lost during identification')
def requestIdentification(self, conn, node_type,
uuid, address, name):
self.checkClusterName(name)
# reject any incoming connections if not ready
if not self.app.ready:
raise NotReadyError
app = self.app
if uuid is None:
if node_type != NodeTypes.STORAGE:
raise ProtocolError('reject anonymous non-storage node')
handler = StorageOperationHandler(self.app)
conn.setHandler(handler)
else:
if uuid == app.uuid:
raise ProtocolError("uuid conflict or loopback connection")
node = app.nm.getByUUID(uuid)
# If this node is broken, reject it.
if node is not None and node.isBroken():
raise BrokenNodeDisallowedError
# choose the handler according to the node type
if node_type == NodeTypes.CLIENT:
handler = ClientOperationHandler
if node is None:
node = app.nm.createClient(uuid=uuid)
elif node.isConnected():
# This can happen if we haven't processed yet a notification
# from the master, telling us the existing node is not
# running anymore. If we accept the new client, we won't
# know what to do with this late notification.
raise NotReadyError('uuid conflict: retry later')
node.setRunning()
elif node_type == NodeTypes.STORAGE:
if node is None:
logging.error('reject an unknown storage node %s',
uuid_str(uuid))
raise NotReadyError
handler = StorageOperationHandler
else:
raise ProtocolError('reject non-client-or-storage node')
# apply the handler and set up the connection
handler = handler(self.app)
conn.setHandler(handler)
node.setConnection(conn, app.uuid < uuid)
# accept the identification and trigger an event
conn.answer(Packets.AcceptIdentification(NodeTypes.STORAGE, uuid and
app.uuid, app.pt.getPartitions(), app.pt.getReplicas(), uuid,
app.master_node.getAddress(), ()))
handler.connectionCompleted(conn)
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/storage/handlers/initialization.py 0000664 0000000 0000000 00000004667 12601037530 0030537 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2006-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
from . import BaseMasterHandler
from neo.lib import logging, protocol
class InitializationHandler(BaseMasterHandler):
def answerNodeInformation(self, conn):
pass
def answerPartitionTable(self, conn, ptid, row_list):
app = self.app
pt = app.pt
pt.load(ptid, row_list, self.app.nm)
if not pt.filled():
raise protocol.ProtocolError('Partial partition table received')
logging.debug('Got the partition table:')
self.app.pt.log()
# Install the partition table into the database for persistency.
cell_list = []
num_partitions = app.pt.getPartitions()
unassigned_set = set(xrange(num_partitions))
for offset in xrange(num_partitions):
for cell in pt.getCellList(offset):
cell_list.append((offset, cell.getUUID(), cell.getState()))
if cell.getUUID() == app.uuid:
unassigned_set.remove(offset)
# delete objects database
if unassigned_set:
logging.debug('drop data for partitions %r', unassigned_set)
app.dm.dropPartitions(unassigned_set)
app.dm.changePartitionTable(ptid, cell_list, reset=True)
def notifyPartitionChanges(self, conn, ptid, cell_list):
# XXX: This is safe to ignore those notifications because all of the
# following applies:
# - we first ask for node information, and *then* partition
# table content, so it is possible to get notifyPartitionChanges
# packets in between (or even before asking for node information).
# - this handler will be changed after receiving answerPartitionTable
# and before handling the next packet
logging.debug('ignoring notifyPartitionChanges during initialization')
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/storage/handlers/master.py 0000664 0000000 0000000 00000005515 12601037530 0026774 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2006-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
from neo.lib import logging
from neo.lib.util import dump
from neo.lib.protocol import Packets, ProtocolError
from . import BaseMasterHandler
class MasterOperationHandler(BaseMasterHandler):
""" This handler is used for the primary master """
def notifyTransactionFinished(self, conn, *args, **kw):
self.app.replicator.transactionFinished(*args, **kw)
def notifyPartitionChanges(self, conn, ptid, cell_list):
"""This is very similar to Send Partition Table, except that
the information is only about changes from the previous."""
app = self.app
if ptid <= app.pt.getID():
# Ignore this packet.
logging.debug('ignoring older partition changes')
return
# update partition table in memory and the database
app.pt.update(ptid, cell_list, app.nm)
app.dm.changePartitionTable(ptid, cell_list)
# Check changes for replications
app.replicator.notifyPartitionChanges(cell_list)
def askLockInformation(self, conn, ttid, tid, oid_list):
if not ttid in self.app.tm:
raise ProtocolError('Unknown transaction')
self.app.tm.lock(ttid, tid, oid_list)
if not conn.isClosed():
conn.answer(Packets.AnswerInformationLocked(ttid))
def notifyUnlockInformation(self, conn, ttid):
if not ttid in self.app.tm:
raise ProtocolError('Unknown transaction')
# TODO: send an answer
self.app.tm.unlock(ttid)
def askPack(self, conn, tid):
app = self.app
logging.info('Pack started, up to %s...', dump(tid))
app.dm.pack(tid, app.tm.updateObjectDataForPack)
logging.info('Pack finished.')
if not conn.isClosed():
conn.answer(Packets.AnswerPack(True))
def replicate(self, conn, tid, upstream_name, source_dict):
self.app.replicator.backup(tid, {p: a and (a, upstream_name)
for p, a in source_dict.iteritems()})
def truncate(self, conn, tid):
self.app.replicator.cancel()
self.app.dm.truncate(tid)
conn.close()
def checkPartition(self, conn, *args):
self.app.checker(*args)
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/storage/handlers/storage.py 0000664 0000000 0000000 00000024202 12601037530 0027137 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2006-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import weakref
from functools import wraps
from neo.lib.connector import ConnectorConnectionClosedException
from neo.lib.handler import EventHandler
from neo.lib.protocol import Errors, NodeStates, Packets, ProtocolError, \
ZERO_HASH
def checkConnectionIsReplicatorConnection(func):
def wrapper(self, conn, *args, **kw):
assert self.app.replicator.getCurrentConnection() is conn
return func(self, conn, *args, **kw)
return wraps(func)(wrapper)
def checkFeedingConnection(check):
def decorator(func):
def wrapper(self, conn, partition, *args, **kw):
app = self.app
cell = app.pt.getCell(partition, app.uuid)
if cell is None or (cell.isOutOfDate() if check else
not cell.isReadable()):
p = Errors.CheckingError if check else Errors.ReplicationError
return conn.answer(p("partition %u not readable" % partition))
conn.asServer()
return func(self, conn, partition, *args, **kw)
return wraps(func)(wrapper)
return decorator
class StorageOperationHandler(EventHandler):
"""This class handles events for replications."""
def connectionLost(self, conn, new_state):
app = self.app
if app.listening_conn and conn.isClient():
# XXX: Connection and Node should merged.
uuid = conn.getUUID()
if uuid:
node = app.nm.getByUUID(uuid)
else:
node = app.nm.getByAddress(conn.getAddress())
node.setState(NodeStates.DOWN)
replicator = app.replicator
if replicator.current_node is node:
replicator.abort()
app.checker.connectionLost(conn)
# Client
def connectionFailed(self, conn):
if self.app.listening_conn:
self.app.replicator.abort()
def _acceptIdentification(self, node, *args):
self.app.replicator.connected(node)
self.app.checker.connected(node)
@checkConnectionIsReplicatorConnection
def answerFetchTransactions(self, conn, pack_tid, next_tid, tid_list):
if tid_list:
deleteTransaction = self.app.dm.deleteTransaction
for tid in tid_list:
deleteTransaction(tid)
assert not pack_tid, "TODO"
if next_tid:
# More than one chunk ? This could be a full replication so avoid
# restarting from the beginning by committing now.
self.app.dm.commit()
self.app.replicator.fetchTransactions(next_tid)
else:
self.app.replicator.fetchObjects()
@checkConnectionIsReplicatorConnection
def addTransaction(self, conn, tid, user, desc, ext, packed, ttid,
oid_list):
# Directly store the transaction.
self.app.dm.storeTransaction(tid, (),
(oid_list, user, desc, ext, packed, ttid), False)
@checkConnectionIsReplicatorConnection
def answerFetchObjects(self, conn, pack_tid, next_tid,
next_oid, object_dict):
if object_dict:
deleteObject = self.app.dm.deleteObject
for serial, oid_list in object_dict.iteritems():
for oid in oid_list:
deleteObject(oid, serial)
# XXX: It should be possible not to commit here if it was the last
# chunk, because we'll either commit again when updating
# 'backup_tid' or the partition table.
self.app.dm.commit()
assert not pack_tid, "TODO"
if next_tid:
self.app.replicator.fetchObjects(next_tid, next_oid)
else:
self.app.replicator.finish()
@checkConnectionIsReplicatorConnection
def addObject(self, conn, oid, serial, compression,
checksum, data, data_serial):
dm = self.app.dm
if data or checksum != ZERO_HASH:
data_id = dm.storeData(checksum, data, compression)
else:
data_id = None
# Directly store the transaction.
obj = oid, data_id, data_serial
dm.storeTransaction(serial, (obj,), None, False)
@checkConnectionIsReplicatorConnection
def replicationError(self, conn, message):
self.app.replicator.abort('source message: ' + message)
def checkingError(self, conn, message):
try:
self.app.checker.connectionLost(conn)
finally:
self.app.closeClient(conn)
@property
def answerCheckTIDRange(self):
return self.app.checker.checkRange
@property
def answerCheckSerialRange(self):
return self.app.checker.checkRange
# Server (all methods must set connection as server so that it isn't closed
# if client tasks are finished)
@checkFeedingConnection(check=True)
def askCheckTIDRange(self, conn, *args):
app = self.app
if app.tm.isLockedTid(args[3]): # max_tid
app.queueEvent(self.askCheckTIDRange, conn, args)
return
msg_id = conn.getPeerId()
conn = weakref.proxy(conn)
def check():
r = app.dm.checkTIDRange(*args)
try:
conn.answer(Packets.AnswerCheckTIDRange(*r), msg_id)
except (weakref.ReferenceError, ConnectorConnectionClosedException):
pass
yield
app.newTask(check())
@checkFeedingConnection(check=True)
def askCheckSerialRange(self, conn, *args):
app = self.app
if app.tm.isLockedTid(args[3]): # max_tid
raise ProtocolError("transactions must be checked before objects")
msg_id = conn.getPeerId()
conn = weakref.proxy(conn)
def check():
r = app.dm.checkSerialRange(*args)
try:
conn.answer(Packets.AnswerCheckSerialRange(*r), msg_id)
except (weakref.ReferenceError, ConnectorConnectionClosedException):
pass
yield
app.newTask(check())
@checkFeedingConnection(check=False)
def askFetchTransactions(self, conn, partition, length, min_tid, max_tid,
tid_list):
app = self.app
if app.tm.isLockedTid(max_tid):
# Wow, backup cluster is fast. Requested transactions are still in
# ttrans/ttobj so wait a little.
app.queueEvent(self.askFetchTransactions, conn,
(partition, length, min_tid, max_tid, tid_list))
return
msg_id = conn.getPeerId()
conn = weakref.proxy(conn)
peer_tid_set = set(tid_list)
dm = app.dm
tid_list = dm.getReplicationTIDList(min_tid, max_tid, length + 1,
partition)
next_tid = tid_list.pop() if length < len(tid_list) else None
def push():
try:
pack_tid = None # TODO
for tid in tid_list:
if tid in peer_tid_set:
peer_tid_set.remove(tid)
else:
t = dm.getTransaction(tid)
if t is None:
conn.answer(Errors.ReplicationError(
"partition %u dropped" % partition))
return
oid_list, user, desc, ext, packed, ttid = t
conn.notify(Packets.AddTransaction(
tid, user, desc, ext, packed, ttid, oid_list))
yield
conn.answer(Packets.AnswerFetchTransactions(
pack_tid, next_tid, peer_tid_set), msg_id)
yield
except (weakref.ReferenceError, ConnectorConnectionClosedException):
pass
app.newTask(push())
@checkFeedingConnection(check=False)
def askFetchObjects(self, conn, partition, length, min_tid, max_tid,
min_oid, object_dict):
app = self.app
if app.tm.isLockedTid(max_tid):
raise ProtocolError("transactions must be fetched before objects")
msg_id = conn.getPeerId()
conn = weakref.proxy(conn)
dm = app.dm
object_list = dm.getReplicationObjectList(min_tid, max_tid, length + 1,
partition, min_oid)
if length < len(object_list):
next_tid, next_oid = object_list.pop()
else:
next_tid = next_oid = None
def push():
try:
pack_tid = None # TODO
for serial, oid in object_list:
oid_set = object_dict.get(serial)
if oid_set:
if type(oid_set) is list:
object_dict[serial] = oid_set = set(oid_set)
if oid in oid_set:
oid_set.remove(oid)
if not oid_set:
del object_dict[serial]
continue
object = dm.getObject(oid, serial)
if not object:
conn.answer(Errors.ReplicationError(
"partition %u dropped or truncated" % partition))
return
conn.notify(Packets.AddObject(oid, serial, *object[2:]))
yield
conn.answer(Packets.AnswerFetchObjects(
pack_tid, next_tid, next_oid, object_dict), msg_id)
yield
except (weakref.ReferenceError, ConnectorConnectionClosedException):
pass
app.newTask(push())
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/storage/handlers/verification.py 0000664 0000000 0000000 00000006411 12601037530 0030157 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2006-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
from . import BaseMasterHandler
from neo.lib import logging
from neo.lib.protocol import Packets, Errors, INVALID_TID, ZERO_TID
from neo.lib.util import dump
from neo.lib.exception import OperationFailure
class VerificationHandler(BaseMasterHandler):
"""This class deals with events for a verification phase."""
def askLastIDs(self, conn):
app = self.app
ltid, _, _, loid = app.dm.getLastIDs()
conn.answer(Packets.AnswerLastIDs(
loid,
ltid,
app.pt.getID(),
app.dm.getBackupTID()))
def askPartitionTable(self, conn):
pt = self.app.pt
conn.answer(Packets.AnswerPartitionTable(pt.getID(), pt.getRowList()))
def notifyPartitionChanges(self, conn, ptid, cell_list):
"""This is very similar to Send Partition Table, except that
the information is only about changes from the previous."""
app = self.app
if ptid <= app.pt.getID():
# Ignore this packet.
logging.debug('ignoring older partition changes')
return
# update partition table in memory and the database
app.pt.update(ptid, cell_list, app.nm)
app.dm.changePartitionTable(ptid, cell_list)
def startOperation(self, conn, backup):
self.app.operational = True
dm = self.app.dm
if backup:
if dm.getBackupTID():
return
tid = dm.getLastIDs()[0] or ZERO_TID
else:
tid = None
dm.setBackupTID(tid)
def stopOperation(self, conn):
raise OperationFailure('operation stopped')
def askUnfinishedTransactions(self, conn):
tid_list = self.app.dm.getUnfinishedTIDList()
conn.answer(Packets.AnswerUnfinishedTransactions(INVALID_TID, tid_list))
def askTransactionInformation(self, conn, tid):
app = self.app
t = app.dm.getTransaction(tid, all=True)
if t is None:
p = Errors.TidNotFound('%s does not exist' % dump(tid))
else:
p = Packets.AnswerTransactionInformation(tid, t[1], t[2], t[3],
t[4], t[0])
conn.answer(p)
def askObjectPresent(self, conn, oid, tid):
if self.app.dm.objectPresent(oid, tid):
p = Packets.AnswerObjectPresent(oid, tid)
else:
p = Errors.OidNotFound(
'%s:%s do not exist' % (dump(oid), dump(tid)))
conn.answer(p)
def deleteTransaction(self, conn, tid, oid_list):
self.app.dm.deleteTransaction(tid, oid_list)
def commitTransaction(self, conn, tid):
self.app.dm.finishTransaction(tid)
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/storage/replicator.py 0000664 0000000 0000000 00000036113 12601037530 0026043 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2006-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
"""
Replication algorithm
Purpose: replicate the content of a reference node into a replicating node,
bringing it up-to-date. This happens in the following cases:
- A new storage is added to en existing cluster.
- A node was separated from cluster and rejoins it.
- In a backup cluster, the master notifies a node that new data exists upstream
(note that in this case, the cell is always marked as UP_TO_DATE).
Replication happens per partition. Reference node can change between
partitions.
2 parts, done sequentially:
- Transaction (metadata) replication
- Object (data) replication
Both parts follow the same mechanism:
- The range of data to replicate is split into chunks of FETCH_COUNT items
(transaction or object).
- For every chunk, the requesting node sends to seeding node the list of items
it already has.
- Before answering, the seeding node sends 1 packet for every missing item.
- The seeding node finally answers with the list of items to delete (usually
empty).
Replication is partial, starting from the greatest stored tid in the partition:
- For transactions, this tid is excluded from replication.
- For objects, this tid is included unless the storage already knows it has
all oids for it.
There is no check that item values on both nodes matches.
TODO: Packing and replication currently fail when they happen at the same time.
"""
import random
from neo.lib import logging
from neo.lib.protocol import CellStates, NodeTypes, NodeStates, \
Packets, INVALID_TID, ZERO_TID, ZERO_OID
from neo.lib.connection import ClientConnection
from neo.lib.util import add64, dump
from .handlers.storage import StorageOperationHandler
FETCH_COUNT = 1000
class Partition(object):
__slots__ = 'next_trans', 'next_obj', 'max_ttid'
def __repr__(self):
return '<%s(%s) at 0x%x>' % (self.__class__.__name__,
', '.join('%s=%r' % (x, getattr(self, x)) for x in self.__slots__
if hasattr(self, x)),
id(self))
class Replicator(object):
current_node = None
current_partition = None
def __init__(self, app):
self.app = app
def getCurrentConnection(self):
node = self.current_node
if node is not None and node.isConnected(True):
return node.getConnection()
# XXX: We can't replicate unfinished transactions but do we need such
# complex code ? Backup mechanism does not rely on this: instead
# the upstream storage delays the answer. Maybe we can do the same
# for internal replication.
def setUnfinishedTIDList(self, max_tid, ttid_list, offset_list):
"""This is a callback from MasterOperationHandler."""
if ttid_list:
self.ttid_set.update(ttid_list)
max_ttid = max(ttid_list)
else:
max_ttid = None
for offset in offset_list:
self.partition_dict[offset].max_ttid = max_ttid
self.replicate_dict[offset] = max_tid
self._nextPartition()
def transactionFinished(self, ttid, max_tid):
""" Callback from MasterOperationHandler """
self.ttid_set.remove(ttid)
min_ttid = min(self.ttid_set) if self.ttid_set else INVALID_TID
for offset, p in self.partition_dict.iteritems():
if p.max_ttid and p.max_ttid < min_ttid:
p.max_ttid = None
self.replicate_dict[offset] = max_tid
self._nextPartition()
def getBackupTID(self):
outdated_set = set(self.app.pt.getOutdatedOffsetListFor(self.app.uuid))
tid = INVALID_TID
for offset, p in self.partition_dict.iteritems():
if offset not in outdated_set:
tid = min(tid, p.next_trans, p.next_obj)
if ZERO_TID != tid != INVALID_TID:
return add64(tid, -1)
return ZERO_TID
def updateBackupTID(self):
dm = self.app.dm
tid = dm.getBackupTID()
if tid:
new_tid = self.getBackupTID()
if tid != new_tid:
dm.setBackupTID(new_tid)
def populate(self):
app = self.app
pt = app.pt
uuid = app.uuid
self.partition_dict = p = {}
self.replicate_dict = {}
self.source_dict = {}
self.ttid_set = set()
last_tid, last_trans_dict, last_obj_dict, _ = app.dm.getLastIDs()
next_tid = app.dm.getBackupTID() or last_tid
next_tid = add64(next_tid, 1) if next_tid else ZERO_TID
outdated_list = []
for offset in xrange(pt.getPartitions()):
for cell in pt.getCellList(offset):
if cell.getUUID() == uuid and not cell.isCorrupted():
self.partition_dict[offset] = p = Partition()
if cell.isOutOfDate():
outdated_list.append(offset)
try:
p.next_trans = add64(last_trans_dict[offset], 1)
except KeyError:
p.next_trans = ZERO_TID
p.next_obj = last_obj_dict.get(offset, ZERO_TID)
p.max_ttid = INVALID_TID
else:
p.next_trans = p.next_obj = next_tid
p.max_ttid = None
if outdated_list:
self.app.master_conn.ask(Packets.AskUnfinishedTransactions(),
offset_list=outdated_list)
def notifyPartitionChanges(self, cell_list):
"""This is a callback from MasterOperationHandler."""
abort = False
added_list = []
app = self.app
last_tid, last_trans_dict, last_obj_dict, _ = app.dm.getLastIDs()
for offset, uuid, state in cell_list:
if uuid == app.uuid:
if state in (CellStates.DISCARDED, CellStates.CORRUPTED):
try:
del self.partition_dict[offset]
except KeyError:
continue
self.replicate_dict.pop(offset, None)
self.source_dict.pop(offset, None)
abort = abort or self.current_partition == offset
elif state == CellStates.OUT_OF_DATE:
assert offset not in self.partition_dict
self.partition_dict[offset] = p = Partition()
try:
p.next_trans = add64(last_trans_dict[offset], 1)
except KeyError:
p.next_trans = ZERO_TID
p.next_obj = last_obj_dict.get(offset, ZERO_TID)
p.max_ttid = INVALID_TID
added_list.append(offset)
if added_list:
self.app.master_conn.ask(Packets.AskUnfinishedTransactions(),
offset_list=added_list)
if abort:
self.abort()
def backup(self, tid, source_dict):
next_tid = None
for offset, source in source_dict.iteritems():
if source:
self.source_dict[offset] = source
self.replicate_dict[offset] = tid
elif offset != self.current_partition and \
offset not in self.replicate_dict:
# The master did its best to avoid useless replication orders
# but there may still be a few, and we may receive redundant
# update notification of backup_tid.
# So, we do nothing here if we are already replicating.
p = self.partition_dict[offset]
if not next_tid:
next_tid = add64(tid, 1)
p.next_trans = p.next_obj = next_tid
if next_tid:
self.updateBackupTID()
self._nextPartition()
def _nextPartition(self):
# XXX: One connection to another storage may remain open forever.
# All other previous connections are automatically closed
# after some time of inactivity.
# This should be improved in several ways:
# - Keeping connections open between 2 clusters (backup case) is
# quite a good thing because establishing a connection costs
# time/bandwidth and replication is actually never finished.
# - When all storages of a non-backup cluster are up-to-date,
# there's no reason to keep any connection open.
if self.current_partition is not None or not self.replicate_dict:
return
app = self.app
# Choose a partition with no unfinished transaction if possible.
# XXX: When leaving backup mode, we should only consider UP_TO_DATE
# cells.
for offset in self.replicate_dict:
if not self.partition_dict[offset].max_ttid:
break
try:
addr, name = self.source_dict[offset]
except KeyError:
assert app.pt.getCell(offset, app.uuid).isOutOfDate()
node = random.choice([cell.getNode()
for cell in app.pt.getCellList(offset, readable=True)
if cell.getNodeState() == NodeStates.RUNNING])
name = None
else:
node = app.nm.getByAddress(addr)
if node is None:
assert name, addr
node = app.nm.createStorage(address=addr)
self.current_partition = offset
previous_node = self.current_node
self.current_node = node
if node.isConnected(connecting=True):
if node.isIdentified():
node.getConnection().asClient()
self.fetchTransactions()
else:
assert name or node.getUUID() != app.uuid, "loopback connection"
conn = ClientConnection(app.em, StorageOperationHandler(app), node)
conn.ask(Packets.RequestIdentification(NodeTypes.STORAGE,
None if name else app.uuid, app.server, name or app.name))
if previous_node is not None and previous_node.isConnected():
app.closeClient(previous_node.getConnection())
def connected(self, node):
if self.current_node is node and self.current_partition is not None:
self.fetchTransactions()
def fetchTransactions(self, min_tid=None):
offset = self.current_partition
p = self.partition_dict[offset]
if min_tid:
p.next_trans = min_tid
else:
try:
addr, name = self.source_dict[offset]
except KeyError:
pass
else:
if addr != self.current_node.getAddress():
return self.abort()
min_tid = p.next_trans
self.replicate_tid = self.replicate_dict.pop(offset)
logging.debug("starting replication of from %r", offset, dump(min_tid),
dump(self.replicate_tid), self.current_node)
max_tid = self.replicate_tid
tid_list = self.app.dm.getReplicationTIDList(min_tid, max_tid,
FETCH_COUNT, offset)
self.current_node.getConnection().ask(Packets.AskFetchTransactions(
offset, FETCH_COUNT, min_tid, max_tid, tid_list))
def fetchObjects(self, min_tid=None, min_oid=ZERO_OID):
offset = self.current_partition
p = self.partition_dict[offset]
max_tid = self.replicate_tid
if min_tid:
p.next_obj = min_tid
else:
min_tid = p.next_obj
p.next_trans = add64(max_tid, 1)
object_dict = {}
for serial, oid in self.app.dm.getReplicationObjectList(min_tid,
max_tid, FETCH_COUNT, offset, min_oid):
try:
object_dict[serial].append(oid)
except KeyError:
object_dict[serial] = [oid]
self.current_node.getConnection().ask(Packets.AskFetchObjects(
offset, FETCH_COUNT, min_tid, max_tid, min_oid, object_dict))
def finish(self):
offset = self.current_partition
tid = self.replicate_tid
del self.current_partition, self.replicate_tid
p = self.partition_dict[offset]
p.next_obj = add64(tid, 1)
self.updateBackupTID()
if not p.max_ttid:
p = Packets.NotifyReplicationDone(offset, tid)
self.app.master_conn.notify(p)
logging.debug("partition %u replicated up to %s from %r",
offset, dump(tid), self.current_node)
self.getCurrentConnection().setReconnectionNoDelay()
self._nextPartition()
def abort(self, message=''):
offset = self.current_partition
if offset is None:
return
del self.current_partition
logging.warning('replication aborted for partition %u%s',
offset, message and ' (%s)' % message)
if offset in self.partition_dict:
# XXX: Try another partition if possible, to increase probability to
# connect to another node. It would be better to explicitely
# search for another node instead.
tid = self.replicate_dict.pop(offset, None) or self.replicate_tid
if self.replicate_dict:
self._nextPartition()
self.replicate_dict[offset] = tid
else:
self.replicate_dict[offset] = tid
self._nextPartition()
else: # partition removed
self._nextPartition()
def cancel(self):
offset = self.current_partition
if offset is not None:
logging.info('cancel replication of partition %u', offset)
del self.current_partition
try:
self.replicate_dict.setdefault(offset, self.replicate_tid)
del self.replicate_tid
except AttributeError:
pass
self.getCurrentConnection().close()
def stop(self):
# Close any open connection to an upstream storage,
# possibly aborting current replication.
node = self.current_node
if node is not None is node.getUUID():
self.cancel()
# Cancel all replication orders from upstream cluster.
for offset in self.replicate_dict.keys():
addr, name = self.source_dict.get(offset, (None, None))
if name:
tid = self.replicate_dict.pop(offset)
logging.info('cancel replication of partition %u from %r'
' up to %s', offset, addr, dump(tid))
# Make UP_TO_DATE cells really UP_TO_DATE
self._nextPartition()
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/storage/transactions.py 0000664 0000000 0000000 00000034337 12601037530 0026415 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2010-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
from time import time
from neo.lib import logging
from neo.lib.util import dump
from neo.lib.protocol import uuid_str, ZERO_TID
class ConflictError(Exception):
"""
Raised when a resolvable conflict occurs
Argument: tid of locking transaction or latest revision
"""
def __init__(self, tid):
Exception.__init__(self)
self._tid = tid
def getTID(self):
return self._tid
class DelayedError(Exception):
"""
Raised when an object is locked by a previous transaction
"""
class Transaction(object):
"""
Container for a pending transaction
"""
_tid = None
def __init__(self, uuid, ttid):
self._uuid = uuid
self._ttid = ttid
self._object_dict = {}
self._transaction = None
self._locked = False
self._birth = time()
self._checked_set = set()
def __repr__(self):
return "<%s(ttid=%r, tid=%r, uuid=%r, locked=%r, age=%.2fs) at 0x%x>" \
% (self.__class__.__name__,
dump(self._ttid),
dump(self._tid),
uuid_str(self._uuid),
self.isLocked(),
time() - self._birth,
id(self))
def addCheckedObject(self, oid):
assert oid not in self._object_dict, dump(oid)
self._checked_set.add(oid)
def getTTID(self):
return self._ttid
def setTID(self, tid):
assert self._tid is None, dump(self._tid)
assert tid is not None
self._tid = tid
def getTID(self):
return self._tid
def getUUID(self):
return self._uuid
def lock(self):
assert not self._locked
self._locked = True
def isLocked(self):
return self._locked
def prepare(self, oid_list, user, desc, ext, packed):
"""
Set the transaction informations
"""
# assert self._transaction is not None
self._transaction = oid_list, user, desc, ext, packed, self._ttid
def addObject(self, oid, data_id, value_serial):
"""
Add an object to the transaction
"""
assert oid not in self._checked_set, dump(oid)
self._object_dict[oid] = oid, data_id, value_serial
def delObject(self, oid):
try:
return self._object_dict.pop(oid)[1]
except KeyError:
self._checked_set.remove(oid)
def getObject(self, oid):
return self._object_dict[oid]
def getObjectList(self):
return self._object_dict.values()
def getOIDList(self):
return self._object_dict.keys()
def getLockedOIDList(self):
return self._object_dict.keys() + list(self._checked_set)
def getTransactionInformations(self):
return self._transaction
class TransactionManager(object):
"""
Manage pending transaction and locks
"""
def __init__(self, app):
self._app = app
self._transaction_dict = {}
self._store_lock_dict = {}
self._load_lock_dict = {}
self._uuid_dict = {}
def __contains__(self, ttid):
"""
Returns True if the TID is known by the manager
"""
return ttid in self._transaction_dict
def register(self, uuid, ttid):
"""
Register a transaction, it may be already registered
"""
logging.debug('Register TXN %s for %s', dump(ttid), uuid_str(uuid))
transaction = self._transaction_dict.get(ttid, None)
if transaction is None:
transaction = Transaction(uuid, ttid)
self._uuid_dict.setdefault(uuid, set()).add(transaction)
self._transaction_dict[ttid] = transaction
return transaction
def getObjectFromTransaction(self, ttid, oid):
"""
Return object data for given running transaction.
Return None if not found.
"""
try:
return self._transaction_dict[ttid].getObject(oid)
except KeyError:
return None
def reset(self):
"""
Reset the transaction manager
"""
self._transaction_dict.clear()
self._store_lock_dict.clear()
self._load_lock_dict.clear()
self._uuid_dict.clear()
def lock(self, ttid, tid, oid_list):
"""
Lock a transaction
"""
logging.debug('Lock TXN %s (ttid=%s)', dump(tid), dump(ttid))
transaction = self._transaction_dict[ttid]
# remember that the transaction has been locked
transaction.lock()
for oid in transaction.getOIDList():
self._load_lock_dict[oid] = ttid
# check every object that should be locked
uuid = transaction.getUUID()
is_assigned = self._app.pt.isAssigned
for oid in oid_list:
if is_assigned(oid, uuid) and \
self._load_lock_dict.get(oid) != ttid:
raise ValueError, 'Some locks are not held'
object_list = transaction.getObjectList()
# txn_info is None is the transaction information is not stored on
# this storage.
txn_info = transaction.getTransactionInformations()
# store data from memory to temporary table
self._app.dm.storeTransaction(tid, object_list, txn_info)
# ...and remember its definitive TID
transaction.setTID(tid)
def getTIDFromTTID(self, ttid):
return self._transaction_dict[ttid].getTID()
def unlock(self, ttid):
"""
Unlock transaction
"""
logging.debug('Unlock TXN %s', dump(ttid))
self._app.dm.finishTransaction(self.getTIDFromTTID(ttid))
self.abort(ttid, even_if_locked=True)
def storeTransaction(self, ttid, oid_list, user, desc, ext, packed):
"""
Store transaction information received from client node
"""
assert ttid in self, "Transaction not registered"
transaction = self._transaction_dict[ttid]
transaction.prepare(oid_list, user, desc, ext, packed)
def getLockingTID(self, oid):
return self._store_lock_dict.get(oid)
def lockObject(self, ttid, serial, oid, unlock=False):
"""
Take a write lock on given object, checking that "serial" is
current.
Raises:
DelayedError
ConflictError
"""
# check if the object if locked
locking_tid = self._store_lock_dict.get(oid)
if locking_tid == ttid and unlock:
logging.info('Deadlock resolution on %r:%r', dump(oid), dump(ttid))
# A duplicate store means client is resolving a deadlock, so
# drop the lock it held on this object, and drop object data for
# consistency.
del self._store_lock_dict[oid]
data_id = self._transaction_dict[ttid].delObject(oid)
if data_id:
self._app.dm.pruneData((data_id,))
# Give a chance to pending events to take that lock now.
self._app.executeQueuedEvents()
# Attemp to acquire lock again.
locking_tid = self._store_lock_dict.get(oid)
if locking_tid is None:
previous_serial = None
elif locking_tid == ttid:
# If previous store was an undo, next store must be based on
# undo target.
previous_serial = self._transaction_dict[ttid].getObject(oid)[2]
if previous_serial is None:
# XXX: use some special serial when previous store was not
# an undo ? Maybe it should just not happen.
logging.info('Transaction %s storing %s more than once',
dump(ttid), dump(oid))
elif locking_tid < ttid:
# We have a bigger TTID than locking transaction, so we are younger:
# enter waiting queue so we are handled when lock gets released.
# We also want to delay (instead of conflict) if the client is
# so faster that it is committing another transaction before we
# processed UnlockInformation from the master.
logging.info('Store delayed for %r:%r by %r', dump(oid),
dump(ttid), dump(locking_tid))
raise DelayedError
else:
# We have a smaller TTID than locking transaction, so we are older:
# this is a possible deadlock case, as we might already hold locks
# the younger transaction is waiting upon. Make client release
# locks & reacquire them by notifying it of the possible deadlock.
logging.info('Possible deadlock on %r:%r with %r',
dump(oid), dump(ttid), dump(locking_tid))
raise ConflictError(ZERO_TID)
if previous_serial is None:
previous_serial = self._app.dm.getLastObjectTID(oid)
if previous_serial is not None and previous_serial != serial:
logging.info('Resolvable conflict on %r:%r',
dump(oid), dump(ttid))
raise ConflictError(previous_serial)
logging.debug('Transaction %s storing %s', dump(ttid), dump(oid))
self._store_lock_dict[oid] = ttid
def checkCurrentSerial(self, ttid, serial, oid):
self.lockObject(ttid, serial, oid, unlock=True)
assert ttid in self, "Transaction not registered"
transaction = self._transaction_dict[ttid]
transaction.addCheckedObject(oid)
def storeObject(self, ttid, serial, oid, compression, checksum, data,
value_serial, unlock=False):
"""
Store an object received from client node
"""
self.lockObject(ttid, serial, oid, unlock=unlock)
# store object
assert ttid in self, "Transaction not registered"
if data is None:
data_id = None
else:
data_id = self._app.dm.holdData(checksum, data, compression)
self._transaction_dict[ttid].addObject(oid, data_id, value_serial)
def abort(self, ttid, even_if_locked=False):
"""
Abort a transaction
Releases locks held on all transaction objects, deletes Transaction
instance, and executed queued events.
Note: does not alter persistent content.
"""
if ttid not in self._transaction_dict:
# the tid may be unknown as the transaction is aborted on every node
# of the partition, even if no data was received (eg. conflict on
# another node)
return
logging.debug('Abort TXN %s', dump(ttid))
transaction = self._transaction_dict[ttid]
has_load_lock = transaction.isLocked()
# if the transaction is locked, ensure we can drop it
if has_load_lock:
if not even_if_locked:
return
else:
self._app.dm.releaseData([data_id
for oid, data_id, value_serial in transaction.getObjectList()
if data_id], True)
# unlock any object
for oid in transaction.getLockedOIDList():
if has_load_lock:
lock_ttid = self._load_lock_dict.pop(oid, None)
assert lock_ttid in (ttid, None), 'Transaction %s tried to ' \
'release the lock on oid %s, but it was held by %s' % (
dump(ttid), dump(oid), dump(lock_ttid))
write_locking_tid = self._store_lock_dict.pop(oid)
assert write_locking_tid == ttid, 'Inconsistent locking state: ' \
'aborting %s:%s but %s has the lock.' % (dump(ttid), dump(oid),
dump(write_locking_tid))
# remove the transaction
uuid = transaction.getUUID()
self._uuid_dict[uuid].discard(transaction)
# clean node index if there is no more current transactions
if not self._uuid_dict[uuid]:
del self._uuid_dict[uuid]
del self._transaction_dict[ttid]
# some locks were released, some pending locks may now succeed
self._app.executeQueuedEvents()
def abortFor(self, uuid):
"""
Abort any non-locked transaction of a node
"""
logging.debug('Abort for %s', uuid_str(uuid))
# abort any non-locked transaction of this node
for ttid in [x.getTTID() for x in self._uuid_dict.get(uuid, [])]:
self.abort(ttid)
# cleanup _uuid_dict if no transaction remains for this node
transaction_set = self._uuid_dict.get(uuid)
if transaction_set is not None and not transaction_set:
del self._uuid_dict[uuid]
def isLockedTid(self, tid):
for t in self._transaction_dict.itervalues():
if t.isLocked() and t.getTID() <= tid:
return True
return False
def loadLocked(self, oid):
return oid in self._load_lock_dict
def log(self):
logging.info("Transactions:")
for txn in self._transaction_dict.values():
logging.info(' %r', txn)
logging.info(' Read locks:')
for oid, ttid in self._load_lock_dict.items():
logging.info(' %r by %r', dump(oid), dump(ttid))
logging.info(' Write locks:')
for oid, ttid in self._store_lock_dict.items():
logging.info(' %r by %r', dump(oid), dump(ttid))
def updateObjectDataForPack(self, oid, orig_serial, new_serial, data_id):
lock_tid = self.getLockingTID(oid)
if lock_tid is not None:
transaction = self._transaction_dict[lock_tid]
if transaction.getObject(oid)[2] == orig_serial:
if new_serial:
data_id = None
else:
self._app.dm.holdData(data_id)
transaction.addObject(oid, data_id, new_serial)
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/tests/ 0000775 0000000 0000000 00000000000 12601037530 0023017 5 ustar 00root root 0000000 0000000 neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/tests/__init__.py 0000664 0000000 0000000 00000050347 12601037530 0025141 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2009-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import __builtin__
import errno
import functools
import gc
import os
import random
import socket
import sys
import tempfile
import unittest
import weakref
import MySQLdb
import transaction
from functools import wraps
from mock import Mock
from neo.lib import debug, logging, protocol
from neo.lib.protocol import NodeTypes, Packets, UUID_NAMESPACES
from time import time
from struct import pack, unpack
from unittest.case import _ExpectedFailure, _UnexpectedSuccess
try:
from ZODB.utils import newTid
except ImportError:
pass
def expectedFailure(exception=AssertionError):
def decorator(func):
def wrapper(*args, **kw):
try:
func(*args, **kw)
except exception, e:
# XXX: passing sys.exc_info() causes deadlocks
raise _ExpectedFailure((type(e), None, None))
raise _UnexpectedSuccess
return wraps(func)(wrapper)
if callable(exception) and not isinstance(exception, type):
func = exception
exception = Exception
return decorator(func)
return decorator
DB_PREFIX = os.getenv('NEO_DB_PREFIX', 'test_neo')
DB_ADMIN = os.getenv('NEO_DB_ADMIN', 'root')
DB_PASSWD = os.getenv('NEO_DB_PASSWD', '')
DB_USER = os.getenv('NEO_DB_USER', 'test')
IP_VERSION_FORMAT_DICT = {
socket.AF_INET: '127.0.0.1',
socket.AF_INET6: '::1',
}
ADDRESS_TYPE = socket.AF_INET
logging.default_root_handler.handle = lambda record: None
logging.backlog(None, 1<<20)
debug.register()
# prevent "signal only works in main thread" errors in subprocesses
debug.register = lambda on_log=None: None
def mockDefaultValue(name, function):
def method(self, *args, **kw):
if name in self.mockReturnValues:
return self.__getattr__(name)(*args, **kw)
return function(self, *args, **kw)
method.__name__ = name
setattr(Mock, name, method)
mockDefaultValue('__nonzero__', lambda self: self.__len__() != 0)
mockDefaultValue('__repr__', lambda self:
'<%s object at 0x%x>' % (self.__class__.__name__, id(self)))
mockDefaultValue('__str__', repr)
def buildUrlFromString(address):
try:
socket.inet_pton(socket.AF_INET6, address)
address = '[%s]' % address
except Exception:
pass
return address
def getTempDirectory():
"""get the current temp directory or a new one"""
try:
temp_dir = os.environ['TEMP']
except KeyError:
neo_dir = os.path.join(tempfile.gettempdir(), 'neo_tests')
while True:
temp_dir = os.path.join(neo_dir, repr(time()))
try:
os.makedirs(temp_dir)
break
except OSError, e:
if e.errno != errno.EEXIST:
raise
os.environ['TEMP'] = temp_dir
print 'Using temp directory %r.' % temp_dir
return temp_dir
def setupMySQLdb(db_list, user=DB_USER, password='', clear_databases=True):
from MySQLdb.constants.ER import BAD_DB_ERROR
conn = MySQLdb.Connect(user=DB_ADMIN, passwd=DB_PASSWD)
cursor = conn.cursor()
for database in db_list:
try:
conn.select_db(database)
if not clear_databases:
continue
cursor.execute('DROP DATABASE `%s`' % database)
except MySQLdb.OperationalError, (code, _):
if code != BAD_DB_ERROR:
raise
cursor.execute('GRANT ALL ON `%s`.* TO "%s"@"localhost" IDENTIFIED'
' BY "%s"' % (database, user, password))
cursor.execute('CREATE DATABASE `%s`' % database)
cursor.close()
conn.commit()
conn.close()
class NeoTestBase(unittest.TestCase):
def setUp(self):
logging.name = self.setupLog()
unittest.TestCase.setUp(self)
def setupLog(self):
test_case, logging.name = self.id().rsplit('.', 1)
logging.setup(os.path.join(getTempDirectory(), test_case + '.log'))
def tearDown(self):
assert self.tearDown.im_func is NeoTestBase.tearDown.im_func
self._tearDown(sys._getframe(1).f_locals['success'])
assert not gc.garbage, gc.garbage
def _tearDown(self, success):
# Kill all unfinished transactions for next test.
# Note we don't even abort them because it may require a valid
# connection to a master node (see Storage.sync()).
transaction.manager.__init__()
class failureException(AssertionError):
def __init__(self, msg=None):
logging.error(msg)
AssertionError.__init__(self, msg)
failIfEqual = failUnlessEqual = assertEquals = assertNotEquals = None
def assertNotEqual(self, first, second, msg=None):
assert not (isinstance(first, Mock) or isinstance(second, Mock)), \
"Mock objects can't be compared with '==' or '!='"
return super(NeoTestBase, self).assertNotEqual(first, second, msg=msg)
def assertEqual(self, first, second, msg=None):
assert not (isinstance(first, Mock) or isinstance(second, Mock)), \
"Mock objects can't be compared with '==' or '!='"
return super(NeoTestBase, self).assertEqual(first, second, msg=msg)
class NeoUnitTestBase(NeoTestBase):
""" Base class for neo tests, implements common checks """
local_ip = IP_VERSION_FORMAT_DICT[ADDRESS_TYPE]
def setUp(self):
self.uuid_dict = {}
NeoTestBase.setUp(self)
def prepareDatabase(self, number, prefix=DB_PREFIX):
""" create empty databases """
adapter = os.getenv('NEO_TESTS_ADAPTER', 'MySQL')
if adapter == 'MySQL':
setupMySQLdb([prefix + str(i) for i in xrange(number)])
elif adapter == 'SQLite':
temp_dir = getTempDirectory()
for i in xrange(number):
try:
os.remove(os.path.join(temp_dir, 'test_neo%s.sqlite' % i))
except OSError, e:
if e.errno != errno.ENOENT:
raise
else:
assert False, adapter
def getMasterConfiguration(self, cluster='main', master_number=2,
replicas=2, partitions=1009, uuid=None):
assert master_number >= 1 and master_number <= 10
masters = ([(self.local_ip, 10010 + i)
for i in xrange(master_number)])
return Mock({
'getCluster': cluster,
'getBind': masters[0],
'getMasters': masters,
'getReplicas': replicas,
'getPartitions': partitions,
'getUUID': uuid,
})
def getStorageConfiguration(self, cluster='main', master_number=2,
index=0, prefix=DB_PREFIX, uuid=None):
assert master_number >= 1 and master_number <= 10
assert index >= 0 and index <= 9
masters = [(buildUrlFromString(self.local_ip),
10010 + i) for i in xrange(master_number)]
adapter = os.getenv('NEO_TESTS_ADAPTER', 'MySQL')
if adapter == 'MySQL':
db = '%s@%s%s' % (DB_USER, prefix, index)
elif adapter == 'SQLite':
db = os.path.join(getTempDirectory(), 'test_neo%s.sqlite' % index)
else:
assert False, adapter
return Mock({
'getCluster': cluster,
'getBind': (masters[0], 10020 + index),
'getMasters': masters,
'getDatabase': db,
'getUUID': uuid,
'getReset': False,
'getAdapter': adapter,
})
def getNewUUID(self, node_type):
"""
Retuns a 16-bytes UUID according to namespace 'prefix'
"""
if node_type is None:
node_type = random.choice(NodeTypes)
self.uuid_dict[node_type] = uuid = 1 + self.uuid_dict.get(node_type, 0)
return uuid + (UUID_NAMESPACES[node_type] << 24)
def getClientUUID(self):
return self.getNewUUID(NodeTypes.CLIENT)
def getMasterUUID(self):
return self.getNewUUID(NodeTypes.MASTER)
def getStorageUUID(self):
return self.getNewUUID(NodeTypes.STORAGE)
def getAdminUUID(self):
return self.getNewUUID(NodeTypes.ADMIN)
def getNextTID(self, ltid=None):
return newTid(ltid)
def getPTID(self, i=None):
""" Return an integer PTID """
if i is None:
return random.randint(1, 2**64)
return i
def getOID(self, i=None):
""" Return a 8-bytes OID """
if i is None:
return os.urandom(8)
return pack('!Q', i)
def getFakeConnector(self, descriptor=None):
return Mock({
'__repr__': 'FakeConnector',
'getDescriptor': descriptor,
'getAddress': ('', 0),
})
def getFakeConnection(self, uuid=None, address=('127.0.0.1', 10000),
is_server=False, connector=None, peer_id=None):
if connector is None:
connector = self.getFakeConnector()
conn = Mock({
'getUUID': uuid,
'getAddress': address,
'isServer': is_server,
'__repr__': 'FakeConnection',
'__nonzero__': 0,
'getConnector': connector,
'getPeerId': peer_id,
})
conn.mockAddReturnValues(__hash__ = id(conn))
conn.connecting = False
return conn
def checkProtocolErrorRaised(self, method, *args, **kwargs):
""" Check if the ProtocolError exception was raised """
self.assertRaises(protocol.ProtocolError, method, *args, **kwargs)
def checkUnexpectedPacketRaised(self, method, *args, **kwargs):
""" Check if the UnexpectedPacketError exception wxas raised """
self.assertRaises(protocol.UnexpectedPacketError, method, *args, **kwargs)
def checkIdenficationRequired(self, method, *args, **kwargs):
""" Check is the identification_required decorator is applied """
self.checkUnexpectedPacketRaised(method, *args, **kwargs)
def checkBrokenNodeDisallowedErrorRaised(self, method, *args, **kwargs):
""" Check if the BrokenNodeDisallowedError exception wxas raised """
self.assertRaises(protocol.BrokenNodeDisallowedError, method, *args, **kwargs)
def checkNotReadyErrorRaised(self, method, *args, **kwargs):
""" Check if the NotReadyError exception wxas raised """
self.assertRaises(protocol.NotReadyError, method, *args, **kwargs)
def checkAborted(self, conn):
""" Ensure the connection was aborted """
self.assertEqual(len(conn.mockGetNamedCalls('abort')), 1)
def checkNotAborted(self, conn):
""" Ensure the connection was not aborted """
self.assertEqual(len(conn.mockGetNamedCalls('abort')), 0)
def checkClosed(self, conn):
""" Ensure the connection was closed """
self.assertEqual(len(conn.mockGetNamedCalls('close')), 1)
def checkNotClosed(self, conn):
""" Ensure the connection was not closed """
self.assertEqual(len(conn.mockGetNamedCalls('close')), 0)
def _checkNoPacketSend(self, conn, method_id):
call_list = conn.mockGetNamedCalls(method_id)
self.assertEqual(len(call_list), 0, call_list)
def checkNoPacketSent(self, conn, check_notify=True, check_answer=True,
check_ask=True):
""" check if no packet were sent """
if check_notify:
self._checkNoPacketSend(conn, 'notify')
if check_answer:
self._checkNoPacketSend(conn, 'answer')
if check_ask:
self._checkNoPacketSend(conn, 'ask')
def checkNoUUIDSet(self, conn):
""" ensure no UUID was set on the connection """
self.assertEqual(len(conn.mockGetNamedCalls('setUUID')), 0)
def checkUUIDSet(self, conn, uuid=None, check_intermediate=True):
""" ensure UUID was set on the connection """
calls = conn.mockGetNamedCalls('setUUID')
found_uuid = calls.pop().getParam(0)
if check_intermediate:
for call in calls:
self.assertEqual(found_uuid, call.getParam(0))
if uuid is not None:
self.assertEqual(found_uuid, uuid)
# in check(Ask|Answer|Notify)Packet we return the packet so it can be used
# in tests if more accurates checks are required
def checkErrorPacket(self, conn, decode=False):
""" Check if an error packet was answered """
calls = conn.mockGetNamedCalls("answer")
self.assertEqual(len(calls), 1)
packet = calls.pop().getParam(0)
self.assertTrue(isinstance(packet, protocol.Packet))
self.assertEqual(type(packet), Packets.Error)
if decode:
return packet.decode()
return packet
def checkAskPacket(self, conn, packet_type, decode=False):
""" Check if an ask-packet with the right type is sent """
calls = conn.mockGetNamedCalls('ask')
self.assertEqual(len(calls), 1)
packet = calls.pop().getParam(0)
self.assertTrue(isinstance(packet, protocol.Packet))
self.assertEqual(type(packet), packet_type)
if decode:
return packet.decode()
return packet
def checkAnswerPacket(self, conn, packet_type, decode=False):
""" Check if an answer-packet with the right type is sent """
calls = conn.mockGetNamedCalls('answer')
self.assertEqual(len(calls), 1)
packet = calls.pop().getParam(0)
self.assertTrue(isinstance(packet, protocol.Packet))
self.assertEqual(type(packet), packet_type)
if decode:
return packet.decode()
return packet
def checkNotifyPacket(self, conn, packet_type, packet_number=0, decode=False):
""" Check if a notify-packet with the right type is sent """
calls = conn.mockGetNamedCalls('notify')
packet = calls.pop(packet_number).getParam(0)
self.assertTrue(isinstance(packet, protocol.Packet))
self.assertEqual(type(packet), packet_type)
if decode:
return packet.decode()
return packet
def checkNotify(self, conn, **kw):
return self.checkNotifyPacket(conn, Packets.Notify, **kw)
def checkNotifyNodeInformation(self, conn, **kw):
return self.checkNotifyPacket(conn, Packets.NotifyNodeInformation, **kw)
def checkSendPartitionTable(self, conn, **kw):
return self.checkNotifyPacket(conn, Packets.SendPartitionTable, **kw)
def checkStartOperation(self, conn, **kw):
return self.checkNotifyPacket(conn, Packets.StartOperation, **kw)
def checkInvalidateObjects(self, conn, **kw):
return self.checkNotifyPacket(conn, Packets.InvalidateObjects, **kw)
def checkAbortTransaction(self, conn, **kw):
return self.checkNotifyPacket(conn, Packets.AbortTransaction, **kw)
def checkNotifyLastOID(self, conn, **kw):
return self.checkNotifyPacket(conn, Packets.NotifyLastOID, **kw)
def checkAnswerTransactionFinished(self, conn, **kw):
return self.checkAnswerPacket(conn, Packets.AnswerTransactionFinished, **kw)
def checkAnswerInformationLocked(self, conn, **kw):
return self.checkAnswerPacket(conn, Packets.AnswerInformationLocked, **kw)
def checkAskLockInformation(self, conn, **kw):
return self.checkAskPacket(conn, Packets.AskLockInformation, **kw)
def checkNotifyUnlockInformation(self, conn, **kw):
return self.checkNotifyPacket(conn, Packets.NotifyUnlockInformation, **kw)
def checkNotifyTransactionFinished(self, conn, **kw):
return self.checkNotifyPacket(conn, Packets.NotifyTransactionFinished, **kw)
def checkRequestIdentification(self, conn, **kw):
return self.checkAskPacket(conn, Packets.RequestIdentification, **kw)
def checkAskPrimary(self, conn, **kw):
return self.checkAskPacket(conn, Packets.AskPrimary)
def checkAskUnfinishedTransactions(self, conn, **kw):
return self.checkAskPacket(conn, Packets.AskUnfinishedTransactions)
def checkAskTransactionInformation(self, conn, **kw):
return self.checkAskPacket(conn, Packets.AskTransactionInformation, **kw)
def checkAskObjectPresent(self, conn, **kw):
return self.checkAskPacket(conn, Packets.AskObjectPresent, **kw)
def checkAskObject(self, conn, **kw):
return self.checkAskPacket(conn, Packets.AskObject, **kw)
def checkAskStoreObject(self, conn, **kw):
return self.checkAskPacket(conn, Packets.AskStoreObject, **kw)
def checkAskStoreTransaction(self, conn, **kw):
return self.checkAskPacket(conn, Packets.AskStoreTransaction, **kw)
def checkAskFinishTransaction(self, conn, **kw):
return self.checkAskPacket(conn, Packets.AskFinishTransaction, **kw)
def checkAskNewTid(self, conn, **kw):
return self.checkAskPacket(conn, Packets.AskBeginTransaction, **kw)
def checkAskLastIDs(self, conn, **kw):
return self.checkAskPacket(conn, Packets.AskLastIDs, **kw)
def checkAcceptIdentification(self, conn, **kw):
return self.checkAnswerPacket(conn, Packets.AcceptIdentification, **kw)
def checkAnswerPrimary(self, conn, **kw):
return self.checkAnswerPacket(conn, Packets.AnswerPrimary, **kw)
def checkAnswerLastIDs(self, conn, **kw):
return self.checkAnswerPacket(conn, Packets.AnswerLastIDs, **kw)
def checkAnswerUnfinishedTransactions(self, conn, **kw):
return self.checkAnswerPacket(conn, Packets.AnswerUnfinishedTransactions, **kw)
def checkAnswerObject(self, conn, **kw):
return self.checkAnswerPacket(conn, Packets.AnswerObject, **kw)
def checkAnswerTransactionInformation(self, conn, **kw):
return self.checkAnswerPacket(conn, Packets.AnswerTransactionInformation, **kw)
def checkAnswerBeginTransaction(self, conn, **kw):
return self.checkAnswerPacket(conn, Packets.AnswerBeginTransaction, **kw)
def checkAnswerTids(self, conn, **kw):
return self.checkAnswerPacket(conn, Packets.AnswerTIDs, **kw)
def checkAnswerTidsFrom(self, conn, **kw):
return self.checkAnswerPacket(conn, Packets.AnswerTIDsFrom, **kw)
def checkAnswerObjectHistory(self, conn, **kw):
return self.checkAnswerPacket(conn, Packets.AnswerObjectHistory, **kw)
def checkAnswerStoreTransaction(self, conn, **kw):
return self.checkAnswerPacket(conn, Packets.AnswerStoreTransaction, **kw)
def checkAnswerStoreObject(self, conn, **kw):
return self.checkAnswerPacket(conn, Packets.AnswerStoreObject, **kw)
def checkAnswerPartitionTable(self, conn, **kw):
return self.checkAnswerPacket(conn, Packets.AnswerPartitionTable, **kw)
def checkAnswerObjectPresent(self, conn, **kw):
return self.checkAnswerPacket(conn, Packets.AnswerObjectPresent, **kw)
class Patch(object):
applied = False
def __init__(self, patched, **patch):
(name, patch), = patch.iteritems()
self._patched = patched
self._name = name
if callable(patch):
wrapped = getattr(patched, name, None)
func = patch
patch = lambda *args, **kw: func(wrapped, *args, **kw)
if callable(wrapped):
patch = wraps(wrapped)(patch)
self._patch = patch
try:
orig = patched.__dict__[name]
self._revert = lambda: setattr(patched, name, orig)
except KeyError:
self._revert = lambda: delattr(patched, name)
def apply(self):
assert not self.applied
setattr(self._patched, self._name, self._patch)
self.applied = True
def revert(self):
del self.applied
self._revert()
def __del__(self):
if self.applied:
self.revert()
def __enter__(self):
self.apply()
return weakref.proxy(self)
def __exit__(self, t, v, tb):
self.__del__()
__builtin__.pdb = lambda depth=0: \
debug.getPdb().set_trace(sys._getframe(depth+1))
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/tests/benchmark.py 0000664 0000000 0000000 00000007146 12601037530 0025333 0 ustar 00root root 0000000 0000000
import sys
import smtplib
import optparse
import platform
import datetime
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
MAIL_SERVER = '127.0.0.1:25'
from neo.lib import logging
logging.backlog()
class AttributeDict(dict):
def __getattr__(self, item):
return self.__getitem__(item)
class BenchmarkRunner(object):
"""
Base class for a command-line benchmark test runner.
"""
def __init__(self):
self._successful = True
self._status = []
parser = optparse.OptionParser()
# register common options
parser.add_option('', '--title')
parser.add_option('', '--mail-to', action='append')
parser.add_option('', '--mail-from')
parser.add_option('', '--mail-server')
parser.add_option('', '--repeat', type='int', default=1)
self.add_options(parser)
# check common arguments
options, self._args = parser.parse_args()
if bool(options.mail_to) ^ bool(options.mail_from):
sys.exit('Need a sender and recipients to mail report')
mail_server = options.mail_server or MAIL_SERVER
# check specifics arguments
self._config = AttributeDict()
self._config.update(self.load_options(options, self._args))
self._config.update(
title = options.title or self.__class__.__name__,
mail_from = options.mail_from,
mail_to = options.mail_to,
mail_server = mail_server.split(':'),
repeat = options.repeat,
)
def add_status(self, key, value):
self._status.append((key, value))
def build_report(self, content):
fmt = "%-25s : %s"
status = "\n".join([fmt % item for item in [
('Title', self._config.title),
('Date', datetime.date.today().isoformat()),
('Node', platform.node()),
('Machine', platform.machine()),
('System', platform.system()),
('Python', platform.python_version()),
]])
status += '\n\n'
status += "\n".join([fmt % item for item in self._status])
return "%s\n\n%s" % (status, content)
def send_report(self, subject, report):
# build report
# build email
msg = MIMEMultipart()
msg['Subject'] = '%s: %s' % (self._config.title, subject)
msg['From'] = self._config.mail_from
msg['To'] = ', '.join(self._config.mail_to)
msg['X-ERP5-Tests'] = 'NEO'
if self._successful:
msg['X-ERP5-Tests-Status'] = 'OK'
msg.epilogue = ''
msg.attach(MIMEText(report))
# send it
s = smtplib.SMTP()
s.connect(*self._config.mail_server)
mail = msg.as_string()
for recipient in self._config.mail_to:
try:
s.sendmail(self._config.mail_from, recipient, mail)
except smtplib.SMTPRecipientsRefused:
print "Mail for %s fails" % recipient
s.close()
def run(self):
subject, report = self.start()
report = self.build_report(report)
if self._config.mail_to:
self.send_report(subject, report)
print subject
print
print report
def was_successful(self):
return self._successful
def add_options(self, parser):
""" Append options to command line parser """
raise NotImplementedError
def load_options(self, options, args):
""" Check options and return a configuration dict """
raise NotImplementedError
def start(self):
""" Run the test """
raise NotImplementedError
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/tests/client/ 0000775 0000000 0000000 00000000000 12601037530 0024275 5 ustar 00root root 0000000 0000000 neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/tests/client/__init__.py 0000664 0000000 0000000 00000000000 12601037530 0026374 0 ustar 00root root 0000000 0000000 neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/tests/client/testClientApp.py 0000664 0000000 0000000 00000104213 12601037530 0027427 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2009-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import threading
import unittest
from cPickle import dumps
from mock import Mock, ReturnValues
from ZODB.POSException import StorageTransactionError, UndoError, ConflictError
from .. import NeoUnitTestBase, buildUrlFromString, ADDRESS_TYPE
from neo.client.app import Application
from neo.client.cache import test as testCache
from neo.client.exception import NEOStorageError, NEOStorageNotFoundError
from neo.lib.protocol import NodeTypes, Packets, Errors, \
INVALID_PARTITION, UUID_NAMESPACES
from neo.lib.util import makeChecksum
import time
class Dispatcher(object):
def pending(self, queue):
return not queue.empty()
def forget_queue(self, queue, flush_queue=True):
pass
def _getMasterConnection(self):
if self.master_conn is None:
self.uuid = 1 + (UUID_NAMESPACES[NodeTypes.CLIENT] << 24)
self.num_partitions = 10
self.num_replicas = 1
self.pt = Mock({'getCellList': ()})
self.master_conn = Mock()
return self.master_conn
def getConnection(kw):
conn = Mock(kw)
conn.lock = threading.RLock()
return conn
def _ask(self, conn, packet, handler=None, **kw):
self.setHandlerData(None)
conn.ask(packet, **kw)
if handler is None:
raise NotImplementedError
else:
handler.dispatch(conn, conn.fakeReceived())
return self.getHandlerData()
def resolving_tryToResolveConflict(oid, conflict_serial, serial, data):
return data
def failing_tryToResolveConflict(oid, conflict_serial, serial, data):
raise ConflictError
class ClientApplicationTests(NeoUnitTestBase):
def setUp(self):
NeoUnitTestBase.setUp(self)
# apply monkey patches
self._getMasterConnection = Application._getMasterConnection
self._ask = Application._ask
Application._getMasterConnection = _getMasterConnection
Application._ask = _ask
self._to_stop_list = []
def _tearDown(self, success):
# stop threads
for app in self._to_stop_list:
app.close()
# restore environnement
Application._ask = self._ask
Application._getMasterConnection = self._getMasterConnection
NeoUnitTestBase._tearDown(self, success)
# some helpers
def _begin(self, app, txn, tid=None):
txn_context = app._txn_container.new(txn)
if tid is None:
tid = self.makeTID()
txn_context['ttid'] = tid
return txn_context
def getApp(self, master_nodes=None, name='test', **kw):
if master_nodes is None:
master_nodes = '%s:10010' % buildUrlFromString(self.local_ip)
app = Application(master_nodes, name, **kw)
self._to_stop_list.append(app)
app.dispatcher = Mock({ })
return app
def getConnectionPool(self, conn_list):
return Mock({
'iterateForObject': conn_list,
})
def makeOID(self, value=None):
from random import randint
if value is None:
value = randint(1, 255)
return '\00' * 7 + chr(value)
makeTID = makeOID
def getNodeCellConn(self, index=1, address=('127.0.0.1', 10000), uuid=None):
conn = getConnection({
'getAddress': address,
'__repr__': 'connection mock',
'getUUID': uuid,
})
node = Mock({
'__repr__': 'node%s' % index,
'__hash__': index,
'getConnection': conn,
})
cell = Mock({
'getAddress': 'FakeServer',
'getState': 'FakeState',
'getNode': node,
})
return (node, cell, conn)
def makeTransactionObject(self, user='u', description='d', _extension='e'):
class Transaction(object):
pass
txn = Transaction()
txn.user = user
txn.description = description
txn._extension = _extension
return txn
def beginTransaction(self, app, tid):
packet = Packets.AnswerBeginTransaction(tid=tid)
packet.setId(0)
app.master_conn = Mock({ 'fakeReceived': packet, })
txn = self.makeTransactionObject()
app.tpc_begin(txn, tid=tid)
return txn
# common checks
def checkDispatcherRegisterCalled(self, app, conn):
calls = app.dispatcher.mockGetNamedCalls('register')
#self.assertEqual(len(calls), 1)
#self.assertEqual(calls[0].getParam(0), conn)
#self.assertTrue(isinstance(calls[0].getParam(2), Queue))
testCache = testCache
def test_registerDB(self):
app = self.getApp()
dummy_db = []
app.registerDB(dummy_db, None)
self.assertTrue(app.getDB() is dummy_db)
def test_new_oid(self):
app = self.getApp()
test_msg_id = 50
test_oid_list = ['\x00\x00\x00\x00\x00\x00\x00\x01', '\x00\x00\x00\x00\x00\x00\x00\x02']
response_packet = Packets.AnswerNewOIDs(test_oid_list[:])
response_packet.setId(0)
app.master_conn = Mock({'getNextId': test_msg_id, '_addPacket': None,
'expectMessage': None,
# Test-specific method
'fakeReceived': response_packet})
new_oid = app.new_oid()
self.assertTrue(new_oid in test_oid_list)
self.assertEqual(len(app.new_oid_list), 1)
self.assertTrue(app.new_oid_list[0] in test_oid_list)
self.assertNotEqual(app.new_oid_list[0], new_oid)
def test_load(self):
app = self.getApp()
cache = app._cache
oid = self.makeOID()
tid1 = self.makeTID(1)
tid2 = self.makeTID(2)
tid3 = self.makeTID(3)
tid4 = self.makeTID(4)
# connection to SN close
self.assertFalse(oid in cache._oid_dict)
conn = Mock({'getAddress': ('', 0)})
app.cp = Mock({'iterateForObject': [(Mock(), conn)]})
def fakeReceived(packet):
packet.setId(0)
conn.fakeReceived = iter((packet,)).next
def fakeObject(oid, serial, next_serial, data):
fakeReceived(Packets.AnswerObject(oid, serial, next_serial, 0,
makeChecksum(data), data, None))
return data, serial, next_serial
fakeReceived(Errors.OidNotFound(''))
#Application._waitMessage = self._waitMessage
# XXX: test disabled because of an infinite loop
# self.assertRaises(NEOStorageError, app.load, oid, None, tid2)
# self.checkAskObject(conn)
#Application._waitMessage = _waitMessage
# object not found in NEO -> NEOStorageNotFoundError
self.assertFalse(oid in cache._oid_dict)
fakeReceived(Errors.OidNotFound(''))
self.assertRaises(NEOStorageNotFoundError, app.load, oid)
self.checkAskObject(conn)
r1 = fakeObject(oid, tid1, tid3, 'FOO')
self.assertEqual(r1, app.load(oid, None, tid2))
self.checkAskObject(conn)
for t in tid2, tid3:
self.assertEqual(cache._load(oid, t).tid, tid1)
self.assertEqual(r1, app.load(oid, tid1))
self.assertEqual(r1, app.load(oid, None, tid3))
self.assertRaises(StandardError, app.load, oid, tid2)
self.assertRaises(StopIteration, app.load, oid)
self.checkAskObject(conn)
r2 = fakeObject(oid, tid3, None, 'BAR')
self.assertEqual(r2, app.load(oid, None, tid4))
self.checkAskObject(conn)
self.assertEqual(r2, app.load(oid))
self.assertEqual(r2, app.load(oid, tid3))
cache.invalidate(oid, tid4)
self.assertRaises(StopIteration, app.load, oid)
self.checkAskObject(conn)
self.assertEqual(len(cache._oid_dict[oid]), 2)
def test_tpc_begin(self):
app = self.getApp()
tid = self.makeTID()
txn = Mock()
# first, tid is supplied
self.assertRaises(StorageTransactionError, app._txn_container.get, txn)
packet = Packets.AnswerBeginTransaction(tid=tid)
packet.setId(0)
app.master_conn = Mock({
'getNextId': 1,
'fakeReceived': packet,
})
app.tpc_begin(transaction=txn, tid=tid)
txn_context = app._txn_container.get(txn)
self.assertTrue(txn_context['txn'] is txn)
self.assertEqual(txn_context['ttid'], tid)
# next, the transaction already begin -> raise
self.assertRaises(StorageTransactionError, app.tpc_begin,
transaction=txn, tid=None)
txn_context = app._txn_container.get(txn)
self.assertTrue(txn_context['txn'] is txn)
self.assertEqual(txn_context['ttid'], tid)
# start a transaction without tid
txn = Mock()
# no connection -> NEOStorageError (wait until connected to primary)
#self.assertRaises(NEOStorageError, app.tpc_begin, transaction=txn, tid=None)
# ask a tid to pmn
packet = Packets.AnswerBeginTransaction(tid=tid)
packet.setId(0)
app.master_conn = Mock({
'getNextId': 1,
'fakeReceived': packet,
})
app.tpc_begin(transaction=txn, tid=None)
self.checkAskNewTid(app.master_conn)
self.checkDispatcherRegisterCalled(app, app.master_conn)
# check attributes
txn_context = app._txn_container.get(txn)
self.assertTrue(txn_context['txn'] is txn)
self.assertEqual(txn_context['ttid'], tid)
def test_store1(self):
app = self.getApp()
oid = self.makeOID(11)
tid = self.makeTID()
txn = self.makeTransactionObject()
# invalid transaction > StorageTransactionError
self.assertRaises(StorageTransactionError, app.store, oid, tid, '',
None, txn)
# check partition_id and an empty cell list -> NEOStorageError
self._begin(app, txn, self.makeTID())
app.pt = Mock({'getCellList': ()})
app.num_partitions = 2
self.assertRaises(NEOStorageError, app.store, oid, tid, '', None,
txn)
calls = app.pt.mockGetNamedCalls('getCellList')
self.assertEqual(len(calls), 1)
def test_store2(self):
app = self.getApp()
oid = self.makeOID(11)
tid = self.makeTID()
txn = self.makeTransactionObject()
# build conflicting state
txn_context = self._begin(app, txn, tid)
packet = Packets.AnswerStoreObject(conflicting=1, oid=oid, serial=tid)
packet.setId(0)
storage_address = ('127.0.0.1', 10020)
node, cell, conn = self.getNodeCellConn(address=storage_address)
app.pt = Mock()
app.cp = self.getConnectionPool([(node, conn)])
app.dispatcher = Dispatcher()
app.nm.createStorage(address=storage_address)
data_dict = txn_context['data_dict']
data_dict[oid] = 'BEFORE'
app.store(oid, tid, '', None, txn)
txn_context['queue'].put((conn, packet, {}))
self.assertRaises(ConflictError, app.waitStoreResponses, txn_context,
failing_tryToResolveConflict)
self.assertTrue(oid not in data_dict)
self.assertEqual(txn_context['object_stored_counter_dict'][oid], {})
self.checkAskStoreObject(conn)
def test_store3(self):
app = self.getApp()
uuid = self.getStorageUUID()
oid = self.makeOID(11)
tid = self.makeTID()
txn = self.makeTransactionObject()
# case with no conflict
txn_context = self._begin(app, txn, tid)
packet = Packets.AnswerStoreObject(conflicting=0, oid=oid, serial=tid)
packet.setId(0)
storage_address = ('127.0.0.1', 10020)
node, cell, conn = self.getNodeCellConn(address=storage_address,
uuid=uuid)
app.cp = self.getConnectionPool([(node, conn)])
app.pt = Mock()
app.dispatcher = Dispatcher()
app.nm.createStorage(address=storage_address)
app.store(oid, tid, 'DATA', None, txn)
self.checkAskStoreObject(conn)
txn_context['queue'].put((conn, packet, {}))
app.waitStoreResponses(txn_context, resolving_tryToResolveConflict)
self.assertEqual(txn_context['object_stored_counter_dict'][oid],
{tid: {uuid}})
self.assertEqual(txn_context['cache_dict'][oid], 'DATA')
self.assertFalse(oid in txn_context['data_dict'])
self.assertFalse(oid in txn_context['conflict_serial_dict'])
def test_tpc_vote1(self):
app = self.getApp()
txn = self.makeTransactionObject()
# invalid transaction > StorageTransactionError
self.assertRaises(StorageTransactionError, app.tpc_vote, txn,
resolving_tryToResolveConflict)
def test_tpc_vote3(self):
app = self.getApp()
tid = self.makeTID()
txn = self.makeTransactionObject()
self._begin(app, txn, tid)
# response -> OK
packet = Packets.AnswerStoreTransaction(tid=tid)
packet.setId(0)
conn = Mock({
'getNextId': 1,
'fakeReceived': packet,
})
node = Mock({
'__hash__': 1,
'__repr__': 'FakeNode',
})
app.cp = self.getConnectionPool([(node, conn)])
app.tpc_vote(txn, resolving_tryToResolveConflict)
self.checkAskStoreTransaction(conn)
self.checkDispatcherRegisterCalled(app, conn)
def test_tpc_abort1(self):
# ignore mismatch transaction
app = self.getApp()
tid = self.makeTID()
txn = self.makeTransactionObject()
old_txn = object()
self._begin(app, old_txn, tid)
app.master_conn = Mock()
conn = Mock()
cell = Mock()
app.cp = Mock({'getConnForCell': ReturnValues(None, cell)})
app.tpc_abort(txn)
# no packet sent
self.checkNoPacketSent(conn)
self.checkNoPacketSent(app.master_conn)
txn_context = app._txn_container.get(old_txn)
self.assertTrue(txn_context['txn'] is old_txn)
self.assertEqual(txn_context['ttid'], tid)
def test_tpc_abort2(self):
# 2 nodes : 1 transaction in the first, 2 objects in the second
# connections to each node should received only one packet to abort
# and transaction must also be aborted on the master node
# for simplicity, just one cell per partition
oid1, oid2 = self.makeOID(2), self.makeOID(4) # on partition 0
app, tid = self.getApp(), self.makeTID(1) # on partition 1
txn = self.makeTransactionObject()
txn_context = self._begin(app, txn, tid)
app.master_conn = Mock({'__hash__': 0})
app.num_partitions = 2
cell1 = Mock({ 'getNode': 'NODE1', '__hash__': 1 })
cell2 = Mock({ 'getNode': 'NODE2', '__hash__': 2 })
conn1, conn2 = Mock({ 'getNextId': 1, }), Mock({ 'getNextId': 2, })
app.cp = Mock({ 'getConnForNode': ReturnValues(conn1, conn2), })
# fake data
txn_context['involved_nodes'].update([cell1, cell2])
app.tpc_abort(txn)
# will check if there was just one call/packet :
self.checkNotifyPacket(conn1, Packets.AbortTransaction)
self.checkNotifyPacket(conn2, Packets.AbortTransaction)
self.checkNotifyPacket(app.master_conn, Packets.AbortTransaction)
self.assertRaises(StorageTransactionError, app._txn_container.get, txn)
def test_tpc_abort3(self):
""" check that abort is sent to all nodes involved in the transaction """
app = self.getApp()
# three partitions/storages: one per object/transaction
app.num_partitions = num_partitions = 3
app.num_replicas = 0
tid = self.makeTID(num_partitions) # on partition 0
oid1 = self.makeOID(num_partitions + 1) # on partition 1, conflicting
oid2 = self.makeOID(num_partitions + 2) # on partition 2
# storage nodes
address1 = ('127.0.0.1', 10000); uuid1 = self.getMasterUUID()
address2 = ('127.0.0.1', 10001); uuid2 = self.getStorageUUID()
address3 = ('127.0.0.1', 10002); uuid3 = self.getStorageUUID()
app.nm.createMaster(address=address1, uuid=uuid1)
app.nm.createStorage(address=address2, uuid=uuid2)
app.nm.createStorage(address=address3, uuid=uuid3)
# answer packets
packet1 = Packets.AnswerStoreTransaction(tid=tid)
packet2 = Packets.AnswerStoreObject(conflicting=1, oid=oid1, serial=tid)
packet3 = Packets.AnswerStoreObject(conflicting=0, oid=oid2, serial=tid)
[p.setId(i) for p, i in zip([packet1, packet2, packet3], range(3))]
conn1 = getConnection({'__repr__': 'conn1', 'getAddress': address1,
'fakeReceived': packet1, 'getUUID': uuid1})
conn2 = getConnection({'__repr__': 'conn2', 'getAddress': address2,
'fakeReceived': packet2, 'getUUID': uuid2})
conn3 = getConnection({'__repr__': 'conn3', 'getAddress': address3,
'fakeReceived': packet3, 'getUUID': uuid3})
node1 = Mock({'__repr__': 'node1', '__hash__': 1, 'getConnection': conn1})
node2 = Mock({'__repr__': 'node2', '__hash__': 2, 'getConnection': conn2})
node3 = Mock({'__repr__': 'node3', '__hash__': 3, 'getConnection': conn3})
# fake environment
app.cp = Mock({'getConnForCell': ReturnValues(conn2, conn3, conn1)})
app.cp = Mock({
'getConnForNode': ReturnValues(conn2, conn3, conn1),
'iterateForObject': [(node2, conn2), (node3, conn3), (node1, conn1)],
})
app.master_conn = Mock({'__hash__': 0})
txn = self.makeTransactionObject()
txn_context = self._begin(app, txn, tid)
app.dispatcher = Dispatcher()
# conflict occurs on storage 2
app.store(oid1, tid, 'DATA', None, txn)
app.store(oid2, tid, 'DATA', None, txn)
queue = txn_context['queue']
queue.put((conn2, packet2, {}))
queue.put((conn3, packet3, {}))
# vote fails as the conflict is not resolved, nothing is sent to storage 3
self.assertRaises(ConflictError, app.tpc_vote, txn, failing_tryToResolveConflict)
# abort must be sent to storage 1 and 2
app.tpc_abort(txn)
self.checkAbortTransaction(conn2)
self.checkAbortTransaction(conn3)
def test_tpc_finish1(self):
# transaction mismatch: raise
app = self.getApp()
txn = self.makeTransactionObject()
app.master_conn = Mock()
self.assertRaises(StorageTransactionError, app.tpc_finish, txn, None)
# no packet sent
self.checkNoPacketSent(app.master_conn)
def test_tpc_finish3(self):
# transaction is finished
app = self.getApp()
tid = self.makeTID()
ttid = self.makeTID()
txn = self.makeTransactionObject()
txn_context = self._begin(app, txn, tid)
self.f_called = False
self.f_called_with_tid = None
packet = Packets.AnswerTransactionFinished(ttid, tid)
packet.setId(0)
app.master_conn = Mock({
'getNextId': 1,
'getAddress': ('127.0.0.1', 10010),
'fakeReceived': packet,
})
txn_context['voted'] = None
app.tpc_finish(txn, None)
self.checkAskFinishTransaction(app.master_conn)
#self.checkDispatcherRegisterCalled(app, app.master_conn)
self.assertRaises(StorageTransactionError, app._txn_container.get, txn)
def test_undo1(self):
# invalid transaction
app = self.getApp()
tid = self.makeTID()
txn = self.makeTransactionObject()
app.master_conn = Mock()
conn = Mock()
self.assertRaises(StorageTransactionError, app.undo, tid,
txn, failing_tryToResolveConflict)
# no packet sent
self.checkNoPacketSent(conn)
self.checkNoPacketSent(app.master_conn)
def _getAppForUndoTests(self, oid0, tid0, tid1, tid2):
app = self.getApp()
cell = Mock({
'getAddress': 'FakeServer',
'getState': 'FakeState',
})
app.pt = Mock({'getCellList': [cell]})
transaction_info = Packets.AnswerTransactionInformation(tid1, '', '',
'', False, (oid0, ))
transaction_info.setId(1)
conn = getConnection({
'getNextId': 1,
'fakeReceived': transaction_info,
'getAddress': ('127.0.0.1', 10020),
})
node = app.nm.createStorage(address=conn.getAddress())
app.cp = Mock({
'iterateForObject': [(node, conn)],
'getConnForCell': conn,
})
app.dispatcher = Dispatcher()
def load(oid, tid=None, before_tid=None):
self.assertEqual(oid, oid0)
return ({tid0: 'dummy', tid2: 'cdummy'}[tid], None, None)
app.load = load
store_marker = []
def _store(txn_context, oid, serial, data, data_serial=None,
unlock=False):
store_marker.append((oid, serial, data, data_serial))
app._store = _store
app.last_tid = self.getNextTID()
return app, conn, store_marker
def test_undoWithResolutionSuccess(self):
"""
Try undoing transaction tid1, which contains object oid.
Object oid previous revision before tid1 is tid0.
Transaction tid2 modified oid (and contains its data).
Undo is accepted, because conflict resolution succeeds.
"""
oid0 = self.makeOID(1)
tid0 = self.getNextTID()
tid1 = self.getNextTID()
tid2 = self.getNextTID()
tid3 = self.getNextTID()
app, conn, store_marker = self._getAppForUndoTests(oid0, tid0, tid1,
tid2)
undo_serial = Packets.AnswerObjectUndoSerial({
oid0: (tid2, tid0, False)})
conn.ask = lambda p, queue=None, **kw: \
isinstance(p, Packets.AskObjectUndoSerial) and \
queue.put((conn, undo_serial, kw))
undo_serial.setId(2)
marker = []
def tryToResolveConflict(oid, conflict_serial, serial, data,
committedData=''):
marker.append((oid, conflict_serial, serial, data, committedData))
return 'solved'
# The undo
txn = self.beginTransaction(app, tid=tid3)
app.undo(tid1, txn, tryToResolveConflict)
# Checking what happened
moid, mconflict_serial, mserial, mdata, mcommittedData = marker[0]
self.assertEqual(moid, oid0)
self.assertEqual(mconflict_serial, tid2)
self.assertEqual(mserial, tid1)
self.assertEqual(mdata, 'dummy')
self.assertEqual(mcommittedData, 'cdummy')
moid, mserial, mdata, mdata_serial = store_marker[0]
self.assertEqual(moid, oid0)
self.assertEqual(mserial, tid2)
self.assertEqual(mdata, 'solved')
self.assertEqual(mdata_serial, None)
def test_undoWithResolutionFailure(self):
"""
Try undoing transaction tid1, which contains object oid.
Object oid previous revision before tid1 is tid0.
Transaction tid2 modified oid (and contains its data).
Undo is rejeced with a raise, because conflict resolution fails.
"""
oid0 = self.makeOID(1)
tid0 = self.getNextTID()
tid1 = self.getNextTID()
tid2 = self.getNextTID()
tid3 = self.getNextTID()
undo_serial = Packets.AnswerObjectUndoSerial({
oid0: (tid2, tid0, False)})
undo_serial.setId(2)
app, conn, store_marker = self._getAppForUndoTests(oid0, tid0, tid1,
tid2)
conn.ask = lambda p, queue=None, **kw: \
type(p) is Packets.AskObjectUndoSerial and \
queue.put((conn, undo_serial, kw))
marker = []
def tryToResolveConflict(oid, conflict_serial, serial, data,
committedData=''):
marker.append((oid, conflict_serial, serial, data, committedData))
raise ConflictError
# The undo
txn = self.beginTransaction(app, tid=tid3)
self.assertRaises(UndoError, app.undo, tid1, txn, tryToResolveConflict)
# Checking what happened
moid, mconflict_serial, mserial, mdata, mcommittedData = marker[0]
self.assertEqual(moid, oid0)
self.assertEqual(mconflict_serial, tid2)
self.assertEqual(mserial, tid1)
self.assertEqual(mdata, 'dummy')
self.assertEqual(mcommittedData, 'cdummy')
self.assertEqual(len(store_marker), 0)
# Likewise, but conflict resolver raises a ConflictError.
# Still, exception raised by undo() must be UndoError.
marker = []
def tryToResolveConflict(oid, conflict_serial, serial, data,
committedData=''):
marker.append((oid, conflict_serial, serial, data, committedData))
raise ConflictError
# The undo
self.assertRaises(UndoError, app.undo, tid1, txn, tryToResolveConflict)
# Checking what happened
moid, mconflict_serial, mserial, mdata, mcommittedData = marker[0]
self.assertEqual(moid, oid0)
self.assertEqual(mconflict_serial, tid2)
self.assertEqual(mserial, tid1)
self.assertEqual(mdata, 'dummy')
self.assertEqual(mcommittedData, 'cdummy')
self.assertEqual(len(store_marker), 0)
def test_undo(self):
"""
Try undoing transaction tid1, which contains object oid.
Object oid previous revision before tid1 is tid0.
Undo is accepted, because tid1 is object's current revision.
"""
oid0 = self.makeOID(1)
tid0 = self.getNextTID()
tid1 = self.getNextTID()
tid2 = self.getNextTID()
tid3 = self.getNextTID()
transaction_info = Packets.AnswerTransactionInformation(tid1, '', '',
'', False, (oid0, ))
transaction_info.setId(1)
undo_serial = Packets.AnswerObjectUndoSerial({
oid0: (tid1, tid0, True)})
undo_serial.setId(2)
app, conn, store_marker = self._getAppForUndoTests(oid0, tid0, tid1,
tid2)
conn.ask = lambda p, queue=None, **kw: \
type(p) is Packets.AskObjectUndoSerial and \
queue.put((conn, undo_serial, kw))
def tryToResolveConflict(oid, conflict_serial, serial, data,
committedData=''):
raise Exception, 'Test called conflict resolution, but there ' \
'is no conflict in this test !'
# The undo
txn = self.beginTransaction(app, tid=tid3)
app.undo(tid1, txn, tryToResolveConflict)
# Checking what happened
moid, mserial, mdata, mdata_serial = store_marker[0]
self.assertEqual(moid, oid0)
self.assertEqual(mserial, tid1)
self.assertEqual(mdata, None)
self.assertEqual(mdata_serial, tid0)
def test_undoLog(self):
app = self.getApp()
app.num_partitions = 2
uuid1, uuid2 = self.getStorageUUID(), self.getStorageUUID()
# two nodes, two partition, two transaction, two objects :
tid1, tid2 = self.makeTID(1), self.makeTID(2)
oid1, oid2 = self.makeOID(1), self.makeOID(2)
# TIDs packets supplied by _ask hook
# TXN info packets
extension = dumps({})
p1 = Packets.AnswerTIDs([tid1])
p2 = Packets.AnswerTIDs([tid2])
p3 = Packets.AnswerTransactionInformation(tid1, '', '',
extension, False, (oid1, ))
p4 = Packets.AnswerTransactionInformation(tid2, '', '',
extension, False, (oid2, ))
p1.setId(0)
p2.setId(1)
p3.setId(2)
p4.setId(3)
conn = Mock({
'getNextId': 1,
'getUUID': ReturnValues(uuid1, uuid2),
'fakeGetApp': app,
'fakeReceived': ReturnValues(p3, p4),
'getAddress': ('127.0.0.1', 10021),
})
asked = []
def answerTIDs(packet):
conn = getConnection({'getAddress': packet})
app.nm.createStorage(address=conn.getAddress())
def ask(p, queue, **kw):
asked.append(p)
queue.put((conn, packet, kw))
conn.ask = ask
return conn
app.dispatcher = Dispatcher()
app.pt = Mock({
'getNodeSet': (Mock(), Mock()),
})
app.cp = Mock({
'getConnForNode': ReturnValues(answerTIDs(p1), answerTIDs(p2)),
'iterateForObject': [(Mock(), conn)]
})
def txn_filter(info):
return info['id'] > '\x00' * 8
first = 0
last = 4
result = app.undoLog(first, last, filter=txn_filter)
pfirst, plast, ppartition = asked.pop().decode()
self.assertEqual(pfirst, first)
self.assertEqual(plast, last)
self.assertEqual(ppartition, INVALID_PARTITION)
pfirst, plast, ppartition = asked.pop().decode()
self.assertEqual(pfirst, first)
self.assertEqual(plast, last)
self.assertEqual(ppartition, INVALID_PARTITION)
self.assertEqual(result[0]['id'], tid1)
self.assertEqual(result[1]['id'], tid2)
self.assertFalse(asked)
def test_connectToPrimaryNode(self):
# here we have three master nodes :
# the connection to the first will fail
# the second will have changed
# the third will not be ready
# after the third, the partition table will be operational
# (as if it was connected to the primary master node)
# will raise IndexError at the third iteration
app = self.getApp('127.0.0.1:10010 127.0.0.1:10011')
# TODO: test more connection failure cases
all_passed = []
# askLastTransaction
def _ask9(_):
all_passed.append(1)
# Seventh packet : askNodeInformation succeeded
def _ask8(_):
pass
# Sixth packet : askPartitionTable succeeded
def _ask7(_):
app.pt = Mock({'operational': True})
# fifth packet : request node identification succeeded
def _ask6(conn):
app.master_conn = conn
app.uuid = 1 + (UUID_NAMESPACES[NodeTypes.CLIENT] << 24)
app.trying_master_node = app.primary_master_node = Mock({
'getAddress': ('127.0.0.1', 10011),
'__str__': 'Fake master node',
})
# third iteration : node not ready
def _ask4(_):
app.trying_master_node = None
# second iteration : master node changed
def _ask3(_):
app.primary_master_node = Mock({
'getAddress': ('127.0.0.1', 10010),
'__str__': 'Fake master node',
})
# first iteration : connection failed
def _ask2(_):
app.trying_master_node = None
# do nothing for the first call
# Case of an unknown primary_uuid (XXX: handler should probably raise,
# it's not normal for a node to inform of a primary uuid without
# telling us what its address is.)
def _ask1(_):
pass
ask_func_list = [_ask1, _ask2, _ask3, _ask4, _ask6, _ask7,
_ask8, _ask9]
def _ask_base(conn, _, handler=None):
ask_func_list.pop(0)(conn)
app.nm.getByAddress(conn.getAddress())._connection = None
app._ask = _ask_base
# faked environnement
app.em.close()
app.em = Mock({'getConnectionList': []})
app.pt = Mock({ 'operational': False})
app.start = lambda: None
app.master_conn = app._connectToPrimaryNode()
self.assertEqual(len(all_passed), 1)
self.assertTrue(app.master_conn is not None)
self.assertTrue(app.pt.operational())
def test_askPrimary(self):
""" _askPrimary is private but test it anyway """
app = self.getApp()
conn = Mock()
app.master_conn = conn
app.primary_handler = Mock()
self.test_ok = False
def _ask_hook(app, conn, packet, handler=None):
conn.ask(packet)
self.assertTrue(handler is app.primary_handler)
self.test_ok = True
_ask_old = Application._ask
Application._ask = _ask_hook
packet = Packets.AskBeginTransaction()
packet.setId(0)
try:
app._askPrimary(packet)
finally:
Application._ask = _ask_old
# check packet sent, connection locked during process and dispatcher updated
self.checkAskNewTid(conn)
self.checkDispatcherRegisterCalled(app, conn)
# and _ask called
self.assertTrue(self.test_ok)
# check NEOStorageError is raised when the primary connection is lost
app.master_conn = None
# check disabled since we reonnect to pmn
#self.assertRaises(NEOStorageError, app._askPrimary, packet)
def test_threadContextIsolation(self):
""" Thread context properties must not be visible accross instances
while remaining in the same thread """
app1 = self.getApp()
app1_local = app1._thread_container
app2 = self.getApp()
app2_local = app2._thread_container
property_id = 'thread_context_test'
value = 'value'
self.assertFalse(hasattr(app1_local, property_id))
self.assertFalse(hasattr(app2_local, property_id))
setattr(app1_local, property_id, value)
self.assertEqual(getattr(app1_local, property_id), value)
self.assertFalse(hasattr(app2_local, property_id))
def test_pack(self):
app = self.getApp()
marker = []
def askPrimary(packet):
marker.append(packet)
app._askPrimary = askPrimary
# XXX: could not identify a value causing TimeStamp to return ZERO_TID
#self.assertRaises(NEOStorageError, app.pack, )
self.assertEqual(len(marker), 0)
now = time.time()
app.pack(now)
self.assertEqual(len(marker), 1)
self.assertEqual(type(marker[0]), Packets.AskPack)
# XXX: how to validate packet content ?
if __name__ == '__main__':
unittest.main()
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/tests/client/testConnectionPool.py 0000664 0000000 0000000 00000010642 12601037530 0030503 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2009-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import time, unittest
from mock import Mock, ReturnValues
from .. import NeoUnitTestBase
from neo.client.app import ConnectionPool
from neo.client.exception import NEOStorageError
from neo.client import pool
class ConnectionPoolTests(NeoUnitTestBase):
def test_removeConnection(self):
app = None
pool = ConnectionPool(app)
test_node_uuid = self.getStorageUUID()
other_node_uuid = self.getStorageUUID()
test_node = Mock({'getUUID': test_node_uuid})
other_node = Mock({'getUUID': other_node_uuid})
# Test sanity check
self.assertEqual(getattr(pool, 'connection_dict', None), {})
# Call must not raise if node is not known
self.assertEqual(len(pool.connection_dict), 0)
pool.removeConnection(test_node)
# Test that removal with another uuid doesn't affect entry
pool.connection_dict[test_node_uuid] = None
self.assertEqual(len(pool.connection_dict), 1)
pool.removeConnection(other_node)
self.assertEqual(len(pool.connection_dict), 1)
# Test that removeConnection works
pool.removeConnection(test_node)
self.assertEqual(len(pool.connection_dict), 0)
# TODO: test getConnForNode (requires splitting complex functionalities)
def test_CellSortKey(self):
cp = ConnectionPool(None)
node_uuid_1 = self.getStorageUUID()
node_uuid_2 = self.getStorageUUID()
node_uuid_3 = self.getStorageUUID()
# We are connected to node 1
cp.connection_dict[node_uuid_1] = None
def uuid_now(func, uuid, now):
pool.time = Mock({'time': now})
try:
return func(Mock({'getUUID': uuid}))
finally:
pool.time = time
# A connection to node 3 failed, will be forgotten at 5
uuid_now(cp.notifyFailure, node_uuid_3, 5 - pool.MAX_FAILURE_AGE)
def getCellSortKey(*args):
return uuid_now(cp.getCellSortKey, *args)
# At 0, key values are not ambiguous
self.assertTrue(getCellSortKey(node_uuid_1, 0) < getCellSortKey(
node_uuid_2, 0) < getCellSortKey(node_uuid_3, 0))
# At 10, nodes 2 and 3 have the same key value
self.assertTrue(getCellSortKey(node_uuid_1, 10) < getCellSortKey(
node_uuid_2, 10))
self.assertEqual(getCellSortKey(node_uuid_2, 10), getCellSortKey(
node_uuid_3, 10))
def test_iterateForObject_noStorageAvailable(self):
# no node available
oid = self.getOID(1)
app = Mock()
app.pt = Mock({'getCellList': []})
pool = ConnectionPool(app)
self.assertRaises(NEOStorageError, pool.iterateForObject(oid).next)
def test_iterateForObject_connectionRefused(self):
# connection refused at the first try
oid = self.getOID(1)
node = Mock({'__repr__': 'node', 'isRunning': True})
cell = Mock({'__repr__': 'cell', 'getNode': node})
conn = Mock({'__repr__': 'conn'})
app = Mock()
app.pt = Mock({'getCellList': [cell]})
pool = ConnectionPool(app)
pool.getConnForNode = Mock({'__call__': ReturnValues(None, conn)})
self.assertEqual(list(pool.iterateForObject(oid)), [(node, conn)])
def test_iterateForObject_connectionAccepted(self):
# connection accepted
oid = self.getOID(1)
node = Mock({'__repr__': 'node', 'isRunning': True})
cell = Mock({'__repr__': 'cell', 'getNode': node})
conn = Mock({'__repr__': 'conn'})
app = Mock()
app.pt = Mock({'getCellList': [cell]})
pool = ConnectionPool(app)
pool.getConnForNode = Mock({'__call__': conn})
self.assertEqual(list(pool.iterateForObject(oid)), [(node, conn)])
if __name__ == '__main__':
unittest.main()
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/tests/client/testMasterHandler.py 0000664 0000000 0000000 00000015605 12601037530 0030307 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2009-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import unittest
from mock import Mock
from .. import NeoUnitTestBase
from neo.lib.node import NodeManager
from neo.lib.pt import PartitionTable
from neo.lib.protocol import NodeTypes
from neo.client.handlers.master import PrimaryBootstrapHandler
from neo.client.handlers.master import PrimaryNotificationsHandler, \
PrimaryAnswersHandler
from neo.client.exception import NEOStorageError
class MasterHandlerTests(NeoUnitTestBase):
def setUp(self):
super(MasterHandlerTests, self).setUp()
self.db = Mock()
self.app = Mock({'getDB': self.db,
'txn_contexts': ()})
self.app.nm = NodeManager()
self.app.dispatcher = Mock()
self._next_port = 3000
def getKnownMaster(self):
node = self.app.nm.createMaster(address=(
self.local_ip, self._next_port),
)
self._next_port += 1
conn = self.getFakeConnection(address=node.getAddress())
node.setConnection(conn)
return node, conn
class MasterBootstrapHandlerTests(MasterHandlerTests):
def setUp(self):
super(MasterBootstrapHandlerTests, self).setUp()
self.handler = PrimaryBootstrapHandler(self.app)
def checkCalledOnApp(self, method, index=0):
calls = self.app.mockGetNamedCalls(method)
self.assertTrue(len(calls) > index)
return calls[index].params
def test_notReady(self):
conn = self.getFakeConnection()
self.handler.notReady(conn, 'message')
self.assertEqual(self.app.trying_master_node, None)
def test_acceptIdentification1(self):
""" Non-master node """
node, conn = self.getKnownMaster()
self.handler.acceptIdentification(conn, NodeTypes.CLIENT,
node.getUUID(), 100, 0, None, None, [])
self.checkClosed(conn)
def test_acceptIdentification2(self):
""" No UUID supplied """
node, conn = self.getKnownMaster()
uuid = self.getMasterUUID()
addr = conn.getAddress()
self.checkProtocolErrorRaised(self.handler.acceptIdentification,
conn, NodeTypes.MASTER, uuid, 100, 0, None,
addr, [(addr, uuid)],
)
def test_acceptIdentification3(self):
""" identification accepted """
node, conn = self.getKnownMaster()
uuid = self.getMasterUUID()
addr = conn.getAddress()
your_uuid = self.getClientUUID()
self.handler.acceptIdentification(conn, NodeTypes.MASTER, uuid,
100, 2, your_uuid, addr, [(addr, uuid)])
self.assertEqual(self.app.uuid, your_uuid)
self.assertEqual(node.getUUID(), uuid)
self.assertTrue(isinstance(self.app.pt, PartitionTable))
def _getMasterList(self, uuid_list):
port = 1000
master_list = []
for uuid in uuid_list:
master_list.append((('127.0.0.1', port), uuid))
port += 1
return master_list
def test_answerPartitionTable(self):
conn = self.getFakeConnection()
self.app.pt = Mock()
ptid = 0
row_list = ([], [])
self.handler.answerPartitionTable(conn, ptid, row_list)
load_calls = self.app.pt.mockGetNamedCalls('load')
self.assertEqual(len(load_calls), 1)
# load_calls[0].checkArgs(ptid, row_list, self.app.nm)
class MasterNotificationsHandlerTests(MasterHandlerTests):
def setUp(self):
super(MasterNotificationsHandlerTests, self).setUp()
self.handler = PrimaryNotificationsHandler(self.app)
def test_connectionClosed(self):
conn = self.getFakeConnection()
node = Mock()
self.app.master_conn = conn
self.app.primary_master_node = node
self.handler.connectionClosed(conn)
self.assertEqual(self.app.master_conn, None)
self.assertEqual(self.app.primary_master_node, None)
def test_invalidateObjects(self):
conn = self.getFakeConnection()
tid = self.getNextTID()
oid1, oid2, oid3 = self.getOID(1), self.getOID(2), self.getOID(3)
self.app._cache = Mock({
'invalidate': None,
})
self.handler.invalidateObjects(conn, tid, [oid1, oid3])
cache_calls = self.app._cache.mockGetNamedCalls('invalidate')
self.assertEqual(len(cache_calls), 2)
cache_calls[0].checkArgs(oid1, tid)
cache_calls[1].checkArgs(oid3, tid)
invalidation_calls = self.db.mockGetNamedCalls('invalidate')
self.assertEqual(len(invalidation_calls), 1)
invalidation_calls[0].checkArgs(tid, [oid1, oid3])
def test_notifyPartitionChanges(self):
conn = self.getFakeConnection()
self.app.pt = Mock({'filled': True})
ptid = 0
cell_list = (Mock(), Mock())
self.handler.notifyPartitionChanges(conn, ptid, cell_list)
update_calls = self.app.pt.mockGetNamedCalls('update')
self.assertEqual(len(update_calls), 1)
update_calls[0].checkArgs(ptid, cell_list, self.app.nm)
class MasterAnswersHandlerTests(MasterHandlerTests):
def setUp(self):
super(MasterAnswersHandlerTests, self).setUp()
self.handler = PrimaryAnswersHandler(self.app)
def test_answerBeginTransaction(self):
tid = self.getNextTID()
conn = self.getFakeConnection()
self.handler.answerBeginTransaction(conn, tid)
calls = self.app.mockGetNamedCalls('setHandlerData')
self.assertEqual(len(calls), 1)
calls[0].checkArgs(tid)
def test_answerNewOIDs(self):
conn = self.getFakeConnection()
oid1, oid2, oid3 = self.getOID(0), self.getOID(1), self.getOID(2)
self.handler.answerNewOIDs(conn, [oid1, oid2, oid3])
self.assertEqual(self.app.new_oid_list, [oid3, oid2, oid1])
def test_answerTransactionFinished(self):
conn = self.getFakeConnection()
ttid2 = self.getNextTID()
tid2 = self.getNextTID()
self.handler.answerTransactionFinished(conn, ttid2, tid2)
calls = self.app.mockGetNamedCalls('setHandlerData')
self.assertEqual(len(calls), 1)
calls[0].checkArgs(tid2)
def test_answerPack(self):
self.assertRaises(NEOStorageError, self.handler.answerPack, None, False)
# Check it doesn't raise
self.handler.answerPack(None, True)
if __name__ == '__main__':
unittest.main()
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/tests/client/testStorageHandler.py 0000664 0000000 0000000 00000020341 12601037530 0030451 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2009-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import unittest
from mock import Mock
from .. import NeoUnitTestBase
from neo.client.handlers.storage import StorageAnswersHandler
from neo.client.exception import NEOStorageError, NEOStorageNotFoundError
from neo.client.exception import NEOStorageDoesNotExistError
from ZODB.TimeStamp import TimeStamp
class StorageAnswerHandlerTests(NeoUnitTestBase):
def setUp(self):
super(StorageAnswerHandlerTests, self).setUp()
self.app = Mock()
self.handler = StorageAnswersHandler(self.app)
def _checkHandlerData(self, ref):
calls = self.app.mockGetNamedCalls('setHandlerData')
self.assertEqual(len(calls), 1)
calls[0].checkArgs(ref)
def test_answerObject(self):
conn = self.getFakeConnection()
oid = self.getOID(0)
tid1 = self.getNextTID()
tid2 = self.getNextTID(tid1)
the_object = (oid, tid1, tid2, 0, '', 'DATA', None)
self.handler.answerObject(conn, *the_object)
self._checkHandlerData(the_object[1:])
def _getAnswerStoreObjectHandler(self, object_stored_counter_dict,
conflict_serial_dict, resolved_conflict_serial_dict):
app = Mock({
'getHandlerData': {
'object_stored_counter_dict': object_stored_counter_dict,
'conflict_serial_dict': conflict_serial_dict,
'resolved_conflict_serial_dict': resolved_conflict_serial_dict,
}
})
return StorageAnswersHandler(app)
def test_answerStoreObject_1(self):
conn = self.getFakeConnection()
oid = self.getOID(0)
tid = self.getNextTID()
# conflict
object_stored_counter_dict = {oid: {}}
conflict_serial_dict = {}
resolved_conflict_serial_dict = {}
self._getAnswerStoreObjectHandler(object_stored_counter_dict,
conflict_serial_dict, resolved_conflict_serial_dict,
).answerStoreObject(conn, 1, oid, tid)
self.assertEqual(conflict_serial_dict[oid], {tid})
self.assertEqual(object_stored_counter_dict[oid], {})
self.assertFalse(oid in resolved_conflict_serial_dict)
# object was already accepted by another storage, raise
handler = self._getAnswerStoreObjectHandler({oid: {tid: {1}}}, {}, {})
self.assertRaises(NEOStorageError, handler.answerStoreObject,
conn, 1, oid, tid)
def test_answerStoreObject_2(self):
conn = self.getFakeConnection()
oid = self.getOID(0)
tid = self.getNextTID()
tid_2 = self.getNextTID()
# resolution-pending conflict
object_stored_counter_dict = {oid: {}}
conflict_serial_dict = {oid: {tid}}
resolved_conflict_serial_dict = {}
self._getAnswerStoreObjectHandler(object_stored_counter_dict,
conflict_serial_dict, resolved_conflict_serial_dict,
).answerStoreObject(conn, 1, oid, tid)
self.assertEqual(conflict_serial_dict[oid], {tid})
self.assertFalse(oid in resolved_conflict_serial_dict)
self.assertEqual(object_stored_counter_dict[oid], {})
# object was already accepted by another storage, raise
handler = self._getAnswerStoreObjectHandler({oid: {tid: {1}}},
{oid: {tid}}, {})
self.assertRaises(NEOStorageError, handler.answerStoreObject,
conn, 1, oid, tid)
# detected conflict is different, don't raise
self._getAnswerStoreObjectHandler({oid: {}}, {oid: {tid}}, {},
).answerStoreObject(conn, 1, oid, tid_2)
def test_answerStoreObject_3(self):
conn = self.getFakeConnection()
oid = self.getOID(0)
tid = self.getNextTID()
tid_2 = self.getNextTID()
# already-resolved conflict
# This case happens if a storage is answering a store action for which
# any other storage already answered (with same conflict) and any other
# storage accepted the resolved object.
object_stored_counter_dict = {oid: {tid_2: 1}}
conflict_serial_dict = {}
resolved_conflict_serial_dict = {oid: {tid}}
self._getAnswerStoreObjectHandler(object_stored_counter_dict,
conflict_serial_dict, resolved_conflict_serial_dict,
).answerStoreObject(conn, 1, oid, tid)
self.assertFalse(oid in conflict_serial_dict)
self.assertEqual(resolved_conflict_serial_dict[oid], {tid})
self.assertEqual(object_stored_counter_dict[oid], {tid_2: 1})
# detected conflict is different, don't raise
self._getAnswerStoreObjectHandler({oid: {tid: 1}}, {},
{oid: {tid}}).answerStoreObject(conn, 1, oid, tid_2)
def test_answerStoreObject_4(self):
uuid = self.getStorageUUID()
conn = self.getFakeConnection(uuid=uuid)
oid = self.getOID(0)
tid = self.getNextTID()
# no conflict
object_stored_counter_dict = {oid: {}}
conflict_serial_dict = {}
resolved_conflict_serial_dict = {}
h = self._getAnswerStoreObjectHandler(object_stored_counter_dict,
conflict_serial_dict, resolved_conflict_serial_dict)
h.app.getHandlerData()['cache_dict'] = {oid: None}
h.answerStoreObject(conn, 0, oid, tid)
self.assertFalse(oid in conflict_serial_dict)
self.assertFalse(oid in resolved_conflict_serial_dict)
self.assertEqual(object_stored_counter_dict[oid], {tid: {uuid}})
def test_answerTransactionInformation(self):
conn = self.getFakeConnection()
tid = self.getNextTID()
user = 'USER'
desc = 'DESC'
ext = 'EXT'
packed = False
oid_list = [self.getOID(0), self.getOID(1)]
self.handler.answerTransactionInformation(conn, tid, user, desc, ext,
packed, oid_list)
self._checkHandlerData(({
'time': TimeStamp(tid).timeTime(),
'user_name': user,
'description': desc,
'id': tid,
'oids': oid_list,
'packed': packed,
}, ext))
def test_oidNotFound(self):
conn = self.getFakeConnection()
self.assertRaises(NEOStorageNotFoundError, self.handler.oidNotFound,
conn, 'message')
def test_oidDoesNotExist(self):
conn = self.getFakeConnection()
self.assertRaises(NEOStorageDoesNotExistError,
self.handler.oidDoesNotExist, conn, 'message')
def test_tidNotFound(self):
conn = self.getFakeConnection()
self.assertRaises(NEOStorageNotFoundError, self.handler.tidNotFound,
conn, 'message')
def test_answerTIDs(self):
uuid = self.getStorageUUID()
tid1 = self.getNextTID()
tid2 = self.getNextTID(tid1)
tid_list = [tid1, tid2]
conn = self.getFakeConnection(uuid=uuid)
tid_set = set()
StorageAnswersHandler(Mock()).answerTIDs(conn, tid_list, tid_set)
self.assertEqual(tid_set, set(tid_list))
def test_answerObjectUndoSerial(self):
uuid = self.getStorageUUID()
conn = self.getFakeConnection(uuid=uuid)
oid1 = self.getOID(1)
oid2 = self.getOID(2)
tid0 = self.getNextTID()
tid1 = self.getNextTID()
tid2 = self.getNextTID()
tid3 = self.getNextTID()
undo_dict = {}
handler = StorageAnswersHandler(Mock())
handler.answerObjectUndoSerial(conn, {oid1: [tid0, tid1]}, undo_dict)
self.assertEqual(undo_dict, {oid1: [tid0, tid1]})
handler.answerObjectUndoSerial(conn, {oid2: [tid2, tid3]}, undo_dict)
self.assertEqual(undo_dict, {
oid1: [tid0, tid1],
oid2: [tid2, tid3],
})
if __name__ == '__main__':
unittest.main()
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/tests/cluster.py 0000664 0000000 0000000 00000020152 12601037530 0025052 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2011-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import __builtin__
import errno
import mmap
import os
import psutil
import signal
import sys
import tempfile
from cPickle import dumps, loads
from functools import wraps
from time import time, sleep
from neo.lib import debug
class ClusterDict(dict):
"""Simple storage (dict), shared with forked processes"""
_acquired = 0
def __init__(self, *args, **kw):
dict.__init__(self, *args, **kw)
self._r, self._w = os.pipe()
# shm_open(3) would be better but Python doesn't provide it.
# See also http://nikitathespider.com/python/shm/
with tempfile.TemporaryFile() as f:
f.write(dumps(self.copy(), -1))
f.flush()
self._shared = mmap.mmap(f.fileno(), f.tell())
self.release()
def __del__(self):
try:
os.close(self._r)
os.close(self._w)
except TypeError: # if os.close is None
pass
def acquire(self):
self._acquired += 1
if not self._acquired:
os.read(self._r, 1)
try:
self.clear()
shared = self._shared
shared.resize(shared.size())
self.update(loads(shared[:]))
except:
self.release()
raise
def release(self, commit=False):
if not self._acquired:
if commit:
self.commit()
os.write(self._w, '\0')
self._acquired -= 1
def commit(self):
shared = self._shared
p = dumps(self.copy(), -1)
shared.resize(len(p))
shared[:] = p
cluster_dict = ClusterDict()
class ClusterPdb(object):
"""Multiprocess-aware wrapper around console and winpdb debuggers
__call__ is the method to break.
TODO: monkey-patch normal code not to timeout
if another node is being debugged
"""
def __init__(self):
self._count_dict = {}
def __setattr__(self, name, value):
try:
hook = getattr(self, name)
setattr(value.im_self, value.__name__, wraps(value)(
lambda *args, **kw: hook(value, *args, **kw)))
except AttributeError:
object.__setattr__(self, name, value)
@property
def broken_peer(self):
return self._getLastPdb(os.getpid()) is None
def __call__(self, max_count=None, depth=0, text=None):
depth += 1
if max_count:
frame = sys._getframe(depth)
key = id(frame.f_code), frame.f_lineno
del frame
self._count_dict[key] = count = 1 + self._count_dict.get(key, 0)
if max_count < count:
return
if not text:
try:
import rpdb2
except ImportError:
if text is not None:
raise
else:
if rpdb2.g_debugger is None:
rpdb2_CStateManager = rpdb2.CStateManager
def CStateManager(*args, **kw):
rpdb2.CStateManager = rpdb2_CStateManager
state_manager = rpdb2.CStateManager(*args, **kw)
self._rpdb2_set_state = state_manager.set_state
return state_manager
rpdb2.CStateManager = CStateManager
return debug.winpdb(depth)
try:
debugger = self.__dict__['_debugger']
except KeyError:
assert 'rpdb2' not in sys.modules
self._debugger = debugger = debug.getPdb()
self._bdb_interaction = debugger.interaction
return debugger.set_trace(sys._getframe(depth))
def kill(self, pid, sig):
force = []
sigint_handler = None
try:
while 1:
cluster_dict.acquire()
try:
last_pdb = cluster_dict.get('last_pdb', {})
if force or pid not in last_pdb:
os.kill(pid, sig)
last_pdb.pop(pid, None)
cluster_dict.commit()
break
try:
if psutil.Process(pid).status() == psutil.STATUS_ZOMBIE:
break
except psutil.NoSuchProcess:
raise OSError(errno.ESRCH, 'No such process')
finally:
cluster_dict.release()
if sigint_handler is None:
sigint_handler = signal.signal(signal.SIGINT,
lambda *args: force.append(None))
sys.stderr.write('Pid %u is/was debugged.'
' Press ^C to kill it...' % pid)
sleep(1)
finally:
if sigint_handler is not None:
signal.signal(signal.SIGINT, sigint_handler)
if force:
sys.stderr.write('\n')
def _lock_console(self):
while 1:
cluster_dict.acquire()
try:
if 'text_pdb' not in cluster_dict:
cluster_dict['text_pdb'] = pid = os.getpid()
cluster_dict.setdefault('last_pdb', {})[pid] = None
cluster_dict.commit()
break
finally:
cluster_dict.release()
sleep(0.5)
def _unlock_console(self):
cluster_dict.acquire()
try:
pid = cluster_dict.pop('text_pdb')
cluster_dict['last_pdb'][pid] = time()
cluster_dict.commit()
finally:
cluster_dict.release()
def _bdb_interaction(self, hooked, *args, **kw):
self._lock_console()
try:
return hooked(*args, **kw)
finally:
self._unlock_console()
def _rpdb2_set_state(self, hooked, state=None, *args, **kw):
from rpdb2 import STATE_BROKEN, STATE_DETACHED
cluster_dict.acquire()
try:
if state is None:
state = hooked.im_self.get_state()
last_pdb = cluster_dict.setdefault('last_pdb', {})
pid = os.getpid()
if state == STATE_DETACHED:
last_pdb.pop(pid, None)
else:
last_pdb[pid] = state != STATE_BROKEN and time() or None
return hooked(state=state, *args, **kw)
finally:
cluster_dict.release(True)
def _getLastPdb(self, *exclude):
result = 0
for pid, last_pdb in cluster_dict.get('last_pdb', {}).iteritems():
if pid not in exclude:
if last_pdb is None:
return
if result < last_pdb:
result = last_pdb
return result
def wait(self, test, timeout):
end_time = time() + timeout
period = 0.1
while not test():
cluster_dict.acquire()
try:
last_pdb = self._getLastPdb()
if last_pdb is None:
next_sleep = 1
else:
next_sleep = max(last_pdb + timeout, end_time) - time()
if next_sleep > period:
next_sleep = period
period *= 1.5
elif next_sleep < 0:
return False
finally:
cluster_dict.release()
sleep(next_sleep)
return True
__builtin__.pdb = ClusterPdb()
signal.signal(signal.SIGUSR1, debug.safe_handler(
lambda sig, frame: pdb(depth=2)))
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/tests/fs2zodb.py 0000664 0000000 0000000 00000010106 12601037530 0024740 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2014-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import os, stat, time
from persistent import Persistent
from persistent.TimeStamp import TimeStamp
from BTrees.OOBTree import OOBTree
class Inode(OOBTree):
data = None
def __init__(self, up=None, mode=stat.S_IFDIR):
self[os.pardir] = self if up is None else up
self.mode = mode
self.mtime = time.time()
def __getstate__(self):
return Persistent.__getstate__(self), OOBTree.__getstate__(self)
def __setstate__(self, state):
Persistent.__setstate__(self, state[0])
OOBTree.__setstate__(self, state[1])
def edit(self, data=None, mtime=None):
fmt = stat.S_IFMT(self.mode)
if data is None:
assert fmt == stat.S_IFDIR, oct(fmt)
else:
assert fmt == stat.S_IFREG or fmt == stat.S_IFLNK, oct(fmt)
if self.data != data:
self.data = data
if self.mtime != mtime:
self.mtime = mtime or time.time()
def root(self):
try:
self = self[os.pardir]
except KeyError:
return self
return self.root()
def traverse(self, path, followlinks=True):
path = iter(path.split(os.sep) if isinstance(path, basestring) and path
else path)
for d in path:
if not d:
return self.root().traverse(path, followlinks)
if d != os.curdir:
d = self[d]
if followlinks and stat.S_ISLNK(d.mode):
d = self.traverse(d.data, True)
return d.traverse(path, followlinks)
return self
def inodeFromFs(self, path):
s = os.lstat(path)
mode = s.st_mode
name = os.path.basename(path)
try:
i = self[name]
assert stat.S_IFMT(i.mode) == stat.S_IFMT(mode)
changed = False
except KeyError:
i = self[name] = self.__class__(self, mode)
changed = True
i.edit(open(path).read() if stat.S_ISREG(mode) else
os.readlink(p) if stat.S_ISLNK(mode) else
None, s.st_mtime)
return changed or i._p_changed
def treeFromFs(self, path, yield_interval=None, filter=None):
prefix_len = len(path) + len(os.sep)
n = 0
for dirpath, dirnames, filenames in os.walk(path):
inodeFromFs = self.traverse(dirpath[prefix_len:]).inodeFromFs
for names in dirnames, filenames:
skipped = []
for j, name in enumerate(names):
p = os.path.join(dirpath, name)
if filter and not filter(p[prefix_len:]):
skipped.append(j)
elif inodeFromFs(p):
n += 1
if n == yield_interval:
n = 0
yield self
while skipped:
del names[skipped.pop()]
if n:
yield self
def walk(self):
s = [(None, self)]
while s:
top, self = s.pop()
dirs = []
nondirs = []
for name, inode in self.iteritems():
if name != os.pardir:
(dirs if stat.S_ISDIR(inode.mode) else nondirs).append(name)
yield top or os.curdir, dirs, nondirs
for name in dirs:
s.append((os.path.join(top, name) if top else name, self[name]))
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/tests/functional/ 0000775 0000000 0000000 00000000000 12601037530 0025161 5 ustar 00root root 0000000 0000000 neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/tests/functional/__init__.py 0000664 0000000 0000000 00000060073 12601037530 0027300 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2009-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import errno
import os
import sys
import time
import ZODB
import socket
import signal
import random
import MySQLdb
import sqlite3
import unittest
import tempfile
import traceback
import threading
import psutil
from ConfigParser import SafeConfigParser
import neo.scripts
from neo.neoctl.neoctl import NeoCTL, NotReadyException
from neo.lib import logging
from neo.lib.protocol import ClusterStates, NodeTypes, CellStates, NodeStates, \
UUID_NAMESPACES
from neo.lib.util import dump
from .. import cluster, DB_USER, setupMySQLdb, NeoTestBase, buildUrlFromString, \
ADDRESS_TYPE, IP_VERSION_FORMAT_DICT, getTempDirectory
from neo.client.Storage import Storage
from neo.storage.database import buildDatabaseManager
command_dict = {
NodeTypes.MASTER: 'neomaster',
NodeTypes.STORAGE: 'neostorage',
NodeTypes.ADMIN: 'neoadmin',
}
DELAY_SAFETY_MARGIN = 10
MAX_START_TIME = 30
class NodeProcessError(Exception):
pass
class AlreadyRunning(Exception):
pass
class AlreadyStopped(Exception):
pass
class NotFound(Exception):
pass
class PortAllocator(object):
def __init__(self):
self.socket_list = []
self.tried_port_set = set()
def allocate(self, address_type, local_ip):
min_port = n = 16384
max_port = min_port + n
tried_port_set = self.tried_port_set
while True:
s = socket.socket(address_type, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Find an unreserved port.
while True:
# Do not let the system choose the port to avoid conflicts
# with other software. IOW, use a range different than:
# - /proc/sys/net/ipv4/ip_local_port_range on Linux
# - what IANA recommends (49152 to 65535)
port = random.randrange(min_port, max_port)
if port not in tried_port_set:
tried_port_set.add(port)
try:
s.bind((local_ip, port))
break
except socket.error, e:
if e.errno != errno.EADDRINUSE:
raise
elif len(tried_port_set) >= n:
raise RuntimeError("No free port")
# Reserve port.
try:
s.listen(1)
self.socket_list.append(s)
return port
except socket.error, e:
if e.errno != errno.EADDRINUSE:
raise
def release(self):
for s in self.socket_list:
s.close()
self.__init__()
__del__ = release
class NEOProcess(object):
pid = 0
def __init__(self, command, uuid, arg_dict):
try:
__import__('neo.scripts.' + command)
except ImportError:
raise NotFound, '%s not found' % (command)
self.command = command
self.arg_dict = {'--' + k: v for k, v in arg_dict.iteritems()}
self.with_uuid = True
self.setUUID(uuid)
def start(self, with_uuid=True):
# Prevent starting when already forked and wait wasn't called.
if self.pid != 0:
raise AlreadyRunning, 'Already running with PID %r' % (self.pid, )
command = self.command
args = []
self.with_uuid = with_uuid
for arg, param in self.arg_dict.iteritems():
if with_uuid is False and arg == '--uuid':
continue
args.append(arg)
if param is not None:
args.append(str(param))
self.pid = os.fork()
if self.pid == 0:
# Child
try:
# release SQLite debug log
logging.setup()
sys.argv = [command] + args
getattr(neo.scripts, command).main()
status = 0
except SystemExit, e:
status = e.code
if status is None:
status = 0
except KeyboardInterrupt:
status = 1
except:
status = -1
traceback.print_exc()
finally:
# prevent child from killing anything (cf __del__), or
# running any other cleanup code normally done by the parent
try:
os._exit(status)
except:
print >>sys.stderr, status
finally:
os._exit(1)
logging.info('pid %u: %s %s',
self.pid, command, ' '.join(map(repr, args)))
def kill(self, sig=signal.SIGTERM):
if self.pid:
logging.info('kill pid %u', self.pid)
try:
pdb.kill(self.pid, sig)
except OSError:
traceback.print_last()
else:
raise AlreadyStopped
def __del__(self):
# If we get killed, kill subprocesses aswell.
try:
self.kill(signal.SIGKILL)
self.wait()
except:
# We can ignore all exceptions at this point, since there is no
# garanteed way to handle them (other objects we would depend on
# might already have been deleted).
pass
def wait(self, options=0):
if self.pid == 0:
raise AlreadyStopped
result = os.WEXITSTATUS(os.waitpid(self.pid, options)[1])
self.pid = 0
if result:
raise NodeProcessError('%r %r exited with status %r' % (
self.command, self.arg_dict, result))
return result
def stop(self):
self.kill()
self.wait()
def getPID(self):
return self.pid
def getUUID(self):
assert self.with_uuid, 'UUID disabled on this process'
return self.uuid
def setUUID(self, uuid):
"""
Note: for this change to take effect, the node must be restarted.
"""
self.uuid = uuid
self.arg_dict['--uuid'] = str(uuid)
def isAlive(self):
try:
return psutil.Process(self.pid).status() != psutil.STATUS_ZOMBIE
except psutil.NoSuchProcess:
return False
class NEOCluster(object):
def __init__(self, db_list, master_count=1, partitions=1, replicas=0,
db_user=DB_USER, db_password='', name=None,
cleanup_on_delete=False, temp_dir=None, clear_databases=True,
adapter=os.getenv('NEO_TESTS_ADAPTER'),
address_type=ADDRESS_TYPE, bind_ip=None, logger=True,
importer=None):
if not adapter:
adapter = 'MySQL'
self.adapter = adapter
self.zodb_storage_list = []
self.cleanup_on_delete = cleanup_on_delete
self.uuid_dict = {}
self.db_list = db_list
if temp_dir is None:
temp_dir = tempfile.mkdtemp(prefix='neo_')
print 'Using temp directory ' + temp_dir
if adapter == 'MySQL':
self.db_user = db_user
self.db_password = db_password
self.db_template = ('%s:%s@%%s' % (db_user, db_password)).__mod__
elif adapter == 'SQLite':
self.db_template = (lambda t: lambda db:
':memory:' if db is None else db if os.sep in db else t % db
)(os.path.join(temp_dir, '%s.sqlite'))
else:
assert False, adapter
self.address_type = address_type
self.local_ip = local_ip = bind_ip or \
IP_VERSION_FORMAT_DICT[self.address_type]
self.setupDB(clear_databases)
if importer:
cfg = SafeConfigParser()
cfg.add_section("neo")
cfg.set("neo", "adapter", adapter)
cfg.set("neo", "database", self.db_template(*db_list))
for name, zodb in importer:
cfg.add_section(name)
for x in zodb.iteritems():
cfg.set(name, *x)
importer_conf = os.path.join(temp_dir, 'importer.cfg')
with open(importer_conf, 'w') as f:
cfg.write(f)
adapter = "Importer"
self.db_template = str
db_list = importer_conf,
self.process_dict = {}
self.temp_dir = temp_dir
self.port_allocator = PortAllocator()
admin_port = self.port_allocator.allocate(address_type, local_ip)
self.cluster_name = name or 'neo_%s' % random.randint(0, 100)
master_node_list = [self.port_allocator.allocate(address_type, local_ip)
for i in xrange(master_count)]
self.master_nodes = ' '.join('%s:%s' % (
buildUrlFromString(self.local_ip), x, )
for x in master_node_list)
# create admin node
self._newProcess(NodeTypes.ADMIN, logger and 'admin', admin_port)
# create master nodes
for i, port in enumerate(master_node_list):
self._newProcess(NodeTypes.MASTER, logger and 'master_%u' % i,
port, partitions=partitions, replicas=replicas)
# create storage nodes
for i, db in enumerate(db_list):
self._newProcess(NodeTypes.STORAGE, logger and 'storage_%u' % i,
0, adapter=adapter, database=self.db_template(db))
# create neoctl
self.neoctl = NeoCTL((self.local_ip, admin_port))
def _newProcess(self, node_type, logfile=None, port=None, **kw):
self.uuid_dict[node_type] = uuid = 1 + self.uuid_dict.get(node_type, 0)
uuid += UUID_NAMESPACES[node_type] << 24
kw['uuid'] = uuid
kw['cluster'] = self.cluster_name
kw['masters'] = self.master_nodes
if logfile:
kw['logfile'] = os.path.join(self.temp_dir, logfile + '.log')
if port is not None:
kw['bind'] = '%s:%u' % (buildUrlFromString(self.local_ip), port)
self.process_dict.setdefault(node_type, []).append(
NEOProcess(command_dict[node_type], uuid, kw))
def setupDB(self, clear_databases=True):
if self.adapter == 'MySQL':
setupMySQLdb(self.db_list, self.db_user, self.db_password,
clear_databases)
elif self.adapter == 'SQLite':
if clear_databases:
for db in self.db_list:
if db is None:
continue
db = self.db_template(db)
try:
os.remove(db)
except OSError, e:
if e.errno != errno.ENOENT:
raise
else:
logging.debug('%r deleted', db)
def run(self, except_storages=()):
""" Start cluster processes except some storage nodes """
assert len(self.process_dict)
self.port_allocator.release()
for process_list in self.process_dict.itervalues():
for process in process_list:
if process not in except_storages:
process.start()
# wait for the admin node availability
def test():
try:
self.neoctl.getClusterState()
except NotReadyException:
return False
return True
if not pdb.wait(test, MAX_START_TIME):
raise AssertionError('Timeout when starting cluster')
def start(self, except_storages=()):
""" Do a complete start of a cluster """
self.run(except_storages=except_storages)
neoctl = self.neoctl
target = [len(self.db_list) - len(except_storages)]
def test():
try:
state = neoctl.getClusterState()
if state == ClusterStates.RUNNING:
return True
if state == ClusterStates.RECOVERING and target[0]:
pending_count = 0
for x in neoctl.getNodeList(node_type=NodeTypes.STORAGE):
if x[3] != NodeStates.PENDING:
target[0] = None # cluster must start automatically
break
pending_count += 1
if pending_count == target[0]:
neoctl.startCluster()
except (NotReadyException, RuntimeError):
pass
if not pdb.wait(test, MAX_START_TIME):
raise AssertionError('Timeout when starting cluster')
def stop(self, clients=True):
# Suspend all processes to kill before actually killing them, so that
# nodes don't log errors because they get disconnected from other nodes:
# otherwise, storage nodes would often flush MB of logs just because we
# killed the master first, and waste much file system space.
stopped_list = []
for process_list in self.process_dict.itervalues():
for process in process_list:
try:
process.kill(signal.SIGSTOP)
stopped_list.append(process)
except AlreadyStopped:
pass
error_list = []
for process in stopped_list:
try:
process.kill(signal.SIGKILL)
process.wait()
except NodeProcessError, e:
error_list += e.args
if clients:
for zodb_storage in self.zodb_storage_list:
zodb_storage.close()
self.zodb_storage_list = []
time.sleep(0.5)
if error_list:
raise NodeProcessError('\n'.join(error_list))
def waitAll(self):
for process_list in self.process_dict.itervalues():
for process in process_list:
try:
process.wait()
except (AlreadyStopped, NodeProcessError):
pass
def getZODBStorage(self, **kw):
master_nodes = self.master_nodes.replace('/', ' ')
result = Storage(
master_nodes=master_nodes,
name=self.cluster_name,
**kw)
self.zodb_storage_list.append(result)
return result
def getZODBConnection(self, **kw):
""" Return a tuple with the database and a connection """
db = ZODB.DB(storage=self.getZODBStorage(**kw))
return (db, db.open())
def getSQLConnection(self, db):
assert db is not None and db in self.db_list
return buildDatabaseManager(self.adapter, (self.db_template(db),))
def getMasterProcessList(self):
return self.process_dict.get(NodeTypes.MASTER)
def getStorageProcessList(self):
return self.process_dict.get(NodeTypes.STORAGE)
def getAdminProcessList(self):
return self.process_dict.get(NodeTypes.ADMIN)
def _killMaster(self, primary=False, all=False):
killed_uuid_list = []
primary_uuid = self.neoctl.getPrimary()
for master in self.getMasterProcessList():
master_uuid = master.getUUID()
is_primary = master_uuid == primary_uuid
if primary and is_primary or not (primary or is_primary):
killed_uuid_list.append(master_uuid)
master.kill()
master.wait()
if not all:
break
return killed_uuid_list
def killPrimary(self):
return self._killMaster(primary=True)
def killSecondaryMaster(self, all=False):
return self._killMaster(primary=False, all=all)
def killMasters(self):
secondary_list = self.killSecondaryMaster(all=True)
primary_list = self.killPrimary()
return secondary_list + primary_list
def killStorage(self, all=False):
killed_uuid_list = []
for storage in self.getStorageProcessList():
killed_uuid_list.append(storage.getUUID())
storage.kill()
storage.wait()
if not all:
break
return killed_uuid_list
def __getNodeList(self, node_type, state=None):
return [x for x in self.neoctl.getNodeList(node_type)
if state is None or x[3] == state]
def getMasterList(self, state=None):
return self.__getNodeList(NodeTypes.MASTER, state)
def getStorageList(self, state=None):
return self.__getNodeList(NodeTypes.STORAGE, state)
def getClientlist(self, state=None):
return self.__getNodeList(NodeTypes.CLIENT, state)
def __getNodeState(self, node_type, uuid):
node_list = self.__getNodeList(node_type)
for node_type, address, node_uuid, state in node_list:
if node_uuid == uuid:
break
else:
state = None
return state
def getMasterNodeState(self, uuid):
return self.__getNodeState(NodeTypes.MASTER, uuid)
def getPrimary(self):
try:
current_try = self.neoctl.getPrimary()
except NotReadyException:
current_try = None
return current_try
def expectCondition(self, condition, timeout=0, on_fail=None):
end = time.time() + timeout + DELAY_SAFETY_MARGIN
opaque_history = [None]
def test():
reached, opaque = condition(opaque_history[-1])
if not reached:
opaque_history.append(opaque)
return reached
if not pdb.wait(test, timeout + DELAY_SAFETY_MARGIN):
del opaque_history[0]
if on_fail is not None:
on_fail(opaque_history)
raise AssertionError('Timeout while expecting condition. '
'History: %s' % opaque_history)
def expectAllMasters(self, node_count, state=None, *args, **kw):
def callback(last_try):
try:
current_try = len(self.getMasterList(state=state))
except NotReadyException:
current_try = 0
if last_try is not None and current_try < last_try:
raise AssertionError, 'Regression: %s became %s' % \
(last_try, current_try)
return (current_try == node_count, current_try)
self.expectCondition(callback, *args, **kw)
def __expectNodeState(self, node_type, uuid, state, *args, **kw):
if not isinstance(state, (tuple, list)):
state = (state, )
def callback(last_try):
try:
current_try = self.__getNodeState(node_type, uuid)
except NotReadyException:
current_try = None
return current_try in state, current_try
self.expectCondition(callback, *args, **kw)
def expectMasterState(self, uuid, state, *args, **kw):
self.__expectNodeState(NodeTypes.MASTER, uuid, state, *args, **kw)
def expectStorageState(self, uuid, state, *args, **kw):
self.__expectNodeState(NodeTypes.STORAGE, uuid, state, *args, **kw)
def expectRunning(self, process, *args, **kw):
self.expectStorageState(process.getUUID(), NodeStates.RUNNING,
*args, **kw)
def expectPending(self, process, *args, **kw):
self.expectStorageState(process.getUUID(), NodeStates.PENDING,
*args, **kw)
def expectUnknown(self, process, *args, **kw):
self.expectStorageState(process.getUUID(), NodeStates.UNKNOWN,
*args, **kw)
def expectUnavailable(self, process, *args, **kw):
self.expectStorageState(process.getUUID(),
NodeStates.TEMPORARILY_DOWN, *args, **kw)
def expectPrimary(self, uuid=None, *args, **kw):
def callback(last_try):
current_try = self.getPrimary()
if None not in (uuid, current_try) and uuid != current_try:
raise AssertionError, 'An unexpected primary arised: %r, ' \
'expected %r' % (dump(current_try), dump(uuid))
return uuid is None or uuid == current_try, current_try
self.expectCondition(callback, *args, **kw)
def expectOudatedCells(self, number, *args, **kw):
def callback(last_try):
row_list = self.neoctl.getPartitionRowList()[1]
number_of_oudated = 0
for row in row_list:
for cell in row[1]:
if cell[1] == CellStates.OUT_OF_DATE:
number_of_oudated += 1
return number_of_oudated == number, number_of_oudated
self.expectCondition(callback, *args, **kw)
def expectAssignedCells(self, process, number, *args, **kw):
def callback(last_try):
row_list = self.neoctl.getPartitionRowList()[1]
assigned_cells_number = 0
for row in row_list:
for cell in row[1]:
if cell[0] == process.getUUID():
assigned_cells_number += 1
return assigned_cells_number == number, assigned_cells_number
self.expectCondition(callback, *args, **kw)
def expectClusterState(self, state, *args, **kw):
def callback(last_try):
try:
current_try = self.neoctl.getClusterState()
except NotReadyException:
current_try = None
return current_try == state, current_try
self.expectCondition(callback, *args, **kw)
def expectClusterRecovering(self, *args, **kw):
self.expectClusterState(ClusterStates.RECOVERING, *args, **kw)
def expectClusterVerifying(self, *args, **kw):
self.expectClusterState(ClusterStates.VERIFYING, *args, **kw)
def expectClusterRunning(self, *args, **kw):
self.expectClusterState(ClusterStates.RUNNING, *args, **kw)
def expectAlive(self, process, *args, **kw):
def callback(last_try):
current_try = process.isAlive()
return current_try, current_try
self.expectCondition(callback, *args, **kw)
def expectDead(self, process, *args, **kw):
def callback(last_try):
current_try = not process.isAlive()
return current_try, current_try
self.expectCondition(callback, *args, **kw)
def expectStorageNotKnown(self, process, *args, **kw):
# /!\ Not Known != Unknown
process_uuid = process.getUUID()
def expected_storage_not_known(last_try):
for storage in self.getStorageList():
if storage[2] == process_uuid:
return False, storage
return True, None
self.expectCondition(expected_storage_not_known, *args, **kw)
def __del__(self):
self.neoctl.close()
if self.cleanup_on_delete:
os.removedirs(self.temp_dir)
class NEOFunctionalTest(NeoTestBase):
def setupLog(self):
logging.setup(os.path.join(self.getTempDirectory(), 'test.log'))
def getTempDirectory(self):
# build the full path based on test case and current test method
temp_dir = os.path.join(getTempDirectory(), self.id())
# build the path if needed
if not os.path.exists(temp_dir):
os.makedirs(temp_dir)
return temp_dir
def runWithTimeout(self, timeout, method, args=(), kwargs=None):
if kwargs is None:
kwargs = {}
exc_list = []
def excWrapper(*args, **kw):
try:
method(*args, **kw)
except:
exc_list.append(sys.exc_info())
thread = threading.Thread(None, excWrapper, args=args, kwargs=kwargs)
thread.daemon = True
thread.start()
thread.join(timeout)
self.assertFalse(thread.is_alive(), 'Run timeout')
if exc_list:
assert len(exc_list) == 1, exc_list
exc = exc_list[0]
raise exc[0], exc[1], exc[2]
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/tests/functional/testClient.py 0000664 0000000 0000000 00000030227 12601037530 0027655 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2009-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import os
import unittest
import transaction
import ZODB
import socket
from struct import pack
from neo.lib.util import makeChecksum, u64
from ZODB.FileStorage import FileStorage
from ZODB.POSException import ConflictError
from ZODB.tests.StorageTestBase import zodb_pickle
from persistent import Persistent
from .. import expectedFailure
from . import NEOCluster, NEOFunctionalTest
TREE_SIZE = 6
class Tree(Persistent):
""" A simple binary tree """
def __init__(self, depth):
self.depth = depth
if depth <= 0:
return
depth -= 1
self.right = Tree(depth)
self.left = Tree(depth)
# simple persitent object with conflict resolution
class PCounter(Persistent):
_value = 0
def value(self):
return self._value
def inc(self):
self._value += 1
class PCounterWithResolution(PCounter):
def _p_resolveConflict(self, old, saved, new):
new['_value'] = saved['_value'] + new['_value']
return new
class PObject(Persistent):
pass
class ClientTests(NEOFunctionalTest):
def setUp(self):
NEOFunctionalTest.setUp(self)
self.neo = NEOCluster(
['test_neo1', 'test_neo2', 'test_neo3', 'test_neo4'],
partitions=3,
replicas=2,
master_count=1,
temp_dir=self.getTempDirectory()
)
def _tearDown(self, success):
self.neo.stop()
del self.neo
NEOFunctionalTest._tearDown(self, success)
def __setup(self):
# start cluster
self.neo.setupDB()
self.neo.start()
self.neo.expectClusterRunning()
self.db = ZODB.DB(self.neo.getZODBStorage())
def makeTransaction(self):
# create a transaction a get the root object
txn = transaction.TransactionManager()
conn = self.db.open(transaction_manager=txn)
return (txn, conn)
def testConflictResolutionTriggered1(self):
""" Check that ConflictError is raised on write conflict """
# create the initial objects
self.__setup()
t, c = self.makeTransaction()
c.root()['without_resolution'] = PCounter()
t.commit()
# first with no conflict resolution
t1, c1 = self.makeTransaction()
t2, c2 = self.makeTransaction()
o1 = c1.root()['without_resolution']
o2 = c2.root()['without_resolution']
self.assertEqual(o1.value(), 0)
self.assertEqual(o2.value(), 0)
o1.inc()
o2.inc()
o2.inc()
t1.commit()
self.assertEqual(o1.value(), 1)
self.assertEqual(o2.value(), 2)
self.assertRaises(ConflictError, t2.commit)
def testIsolationAtZopeLevel(self):
""" Check transaction isolation within zope connection """
self.__setup()
t, c = self.makeTransaction()
root = c.root()
root['item'] = 0
root['other'] = 'bla'
t.commit()
t1, c1 = self.makeTransaction()
t2, c2 = self.makeTransaction()
# Makes c2 take a snapshot of database state
c2.root()['other']
c1.root()['item'] = 1
t1.commit()
# load objet from zope cache
self.assertEqual(c1.root()['item'], 1)
self.assertEqual(c2.root()['item'], 0)
def testIsolationWithoutZopeCache(self):
""" Check isolation with zope cache cleared """
self.__setup()
t, c = self.makeTransaction()
root = c.root()
root['item'] = 0
root['other'] = 'bla'
t.commit()
t1, c1 = self.makeTransaction()
t2, c2 = self.makeTransaction()
# Makes c2 take a snapshot of database state
c2.root()['other']
c1.root()['item'] = 1
t1.commit()
# clear zope cache to force re-ask NEO
c1.cacheMinimize()
c2.cacheMinimize()
self.assertEqual(c1.root()['item'], 1)
self.assertEqual(c2.root()['item'], 0)
def __checkTree(self, tree, depth=TREE_SIZE):
self.assertTrue(isinstance(tree, Tree))
self.assertEqual(depth, tree.depth)
depth -= 1
if depth <= 0:
return
self.__checkTree(tree.right, depth)
self.__checkTree(tree.left, depth)
def __getDataFS(self, reset=False):
name = os.path.join(self.getTempDirectory(), 'data.fs')
if reset and os.path.exists(name):
os.remove(name)
return FileStorage(file_name=name)
def __populate(self, db, tree_size=TREE_SIZE):
if isinstance(db.storage, FileStorage):
from base64 import b64encode as undo_tid
else:
undo_tid = lambda x: x
def undo(tid=None):
db.undo(undo_tid(tid or db.lastTransaction()))
transaction.commit()
conn = db.open()
root = conn.root()
root['trees'] = Tree(tree_size)
ob = root['trees'].right
left = ob.left
del ob.left
transaction.commit()
ob._p_changed = 1
transaction.commit()
t2 = db.lastTransaction()
ob.left = left
transaction.commit()
undo()
t4 = db.lastTransaction()
undo(t2)
undo()
undo(t4)
undo()
undo()
conn.close()
def testImport(self):
# source database
dfs_storage = self.__getDataFS()
dfs_db = ZODB.DB(dfs_storage)
self.__populate(dfs_db)
# create a neo storage
self.neo.start()
neo_storage = self.neo.getZODBStorage()
# copy data fs to neo
neo_storage.copyTransactionsFrom(dfs_storage, verbose=0)
dfs_db.close()
# check neo content
(neo_db, neo_conn) = self.neo.getZODBConnection()
self.__checkTree(neo_conn.root()['trees'])
def __dump(self, storage):
return {u64(t.tid): [(u64(o.oid), o.data_txn and u64(o.data_txn),
None if o.data is None else makeChecksum(o.data))
for o in t]
for t in storage.iterator()}
def testExport(self):
# create a neo storage
self.neo.start()
(neo_db, neo_conn) = self.neo.getZODBConnection()
self.__populate(neo_db)
dump = self.__dump(neo_db.storage)
# copy neo to data fs
dfs_storage = self.__getDataFS(reset=True)
neo_storage = self.neo.getZODBStorage()
dfs_storage.copyTransactionsFrom(neo_storage)
# check data fs content
dfs_db = ZODB.DB(dfs_storage)
root = dfs_db.open().root()
self.__checkTree(root['trees'])
dfs_db.close()
self.neo.stop()
self.neo = NEOCluster(db_list=['test_neo1'], partitions=3,
importer=[("root", {
"storage": "\npath %s\n"
% dfs_storage.getName()})],
temp_dir=self.getTempDirectory())
self.neo.start()
neo_db, neo_conn = self.neo.getZODBConnection()
self.__checkTree(neo_conn.root()['trees'])
self.assertEqual(dump, self.__dump(neo_db.storage))
def testLockTimeout(self):
""" Hold a lock on an object to block a second transaction """
def test():
self.neo = NEOCluster(['test_neo1'], replicas=0,
temp_dir=self.getTempDirectory())
self.neo.start()
# BUG: The following 2 lines creates 2 app, i.e. 2 TCP connections
# to the storage, so there may be a race condition at network
# level and 'st2.store' may be effective before 'st1.store'.
db1, conn1 = self.neo.getZODBConnection()
db2, conn2 = self.neo.getZODBConnection()
st1, st2 = conn1._storage, conn2._storage
t1, t2 = transaction.Transaction(), transaction.Transaction()
t1.user = t2.user = 'user'
t1.description = t2.description = 'desc'
oid = st1.new_oid()
rev = '\0' * 8
data = zodb_pickle(PObject())
st2.tpc_begin(t2)
st1.tpc_begin(t1)
st1.store(oid, rev, data, '', t1)
# this store will be delayed
st2.store(oid, rev, data, '', t2)
# the vote will timeout as t1 never release the lock
self.assertRaises(ConflictError, st2.tpc_vote, t2)
self.runWithTimeout(40, test)
def testIPv6Client(self):
""" Test the connectivity of an IPv6 connection for neo client """
def test():
"""
Implement the IPv6Client test
"""
self.neo = NEOCluster(['test_neo1'], replicas=0,
temp_dir = self.getTempDirectory(),
address_type = socket.AF_INET6
)
self.neo.start()
db1, conn1 = self.neo.getZODBConnection()
db2, conn2 = self.neo.getZODBConnection()
self.runWithTimeout(40, test)
def testDelayedLocksCancelled(self):
"""
Hold a lock on an object, try to get another lock on the same
object to delay it. Then cancel the second transaction and check
that the lock is not hold when the first transaction ends
"""
def test():
self.neo = NEOCluster(['test_neo1'], replicas=0,
temp_dir=self.getTempDirectory())
self.neo.start()
db1, conn1 = self.neo.getZODBConnection()
db2, conn2 = self.neo.getZODBConnection()
st1, st2 = conn1._storage, conn2._storage
t1, t2 = transaction.Transaction(), transaction.Transaction()
t1.user = t2.user = 'user'
t1.description = t2.description = 'desc'
oid = st1.new_oid()
rev = '\0' * 8
data = zodb_pickle(PObject())
st1.tpc_begin(t1)
st2.tpc_begin(t2)
# t1 own the lock
st1.store(oid, rev, data, '', t1)
# t2 store is delayed
st2.store(oid, rev, data, '', t2)
# cancel t2, should cancel the store too
st2.tpc_abort(t2)
# finish t1, should release the lock
st1.tpc_vote(t1)
st1.tpc_finish(t1)
db3, conn3 = self.neo.getZODBConnection()
st3 = conn3._storage
t3 = transaction.Transaction()
t3.user = 'user'
t3.description = 'desc'
st3.tpc_begin(t3)
# retreive the last revision
data, serial = st3.load(oid, '')
# try to store again, should not be delayed
st3.store(oid, serial, data, '', t3)
# the vote should not timeout
st3.tpc_vote(t3)
st3.tpc_finish(t3)
self.runWithTimeout(10, test)
def testGreaterOIDSaved(self):
"""
Store an object with an OID greater than the last generated by the
master. This OID must be intercepted at commit, used for next OID
generations and persistently saved on storage nodes.
"""
self.neo.start()
db1, conn1 = self.neo.getZODBConnection()
st1 = conn1._storage
t1 = transaction.Transaction()
rev = '\0' * 8
data = zodb_pickle(PObject())
my_oid = pack('!Q', 100000)
# store an object with this OID
st1.tpc_begin(t1)
st1.store(my_oid, rev, data, '', t1)
st1.tpc_vote(t1)
st1.tpc_finish(t1)
# request an oid, should be greater than mine
oid = st1.new_oid()
self.assertTrue(oid > my_oid)
def test_suite():
return unittest.makeSuite(ClientTests)
if __name__ == "__main__":
unittest.main(defaultTest="test_suite")
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/tests/functional/testCluster.py 0000664 0000000 0000000 00000014751 12601037530 0030064 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2009-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import unittest
import transaction
from neo.lib.protocol import NodeStates
from . import NEOCluster, NEOFunctionalTest
class ClusterTests(NEOFunctionalTest):
def _tearDown(self, success):
if hasattr(self, "neo"):
self.neo.stop()
del self.neo
NEOFunctionalTest._tearDown(self, success)
def testClusterStartup(self):
neo = self.neo = NEOCluster(['test_neo1', 'test_neo2'], replicas=1,
temp_dir=self.getTempDirectory())
neoctl = neo.neoctl
neo.run()
# Runing a new cluster doesn't exit Recovery state.
s1, s2 = neo.getStorageProcessList()
neo.expectPending(s1)
neo.expectPending(s2)
neo.expectClusterRecovering()
# When allowing cluster to exit Recovery, it reaches Running state and
# all present storage nodes reach running state.
neoctl.startCluster()
neo.expectRunning(s1)
neo.expectRunning(s2)
neo.expectClusterRunning()
# Re-running cluster with a missing storage doesn't exit Recovery
# state.
neo.stop()
neo.run(except_storages=(s2, ))
neo.expectPending(s1)
neo.expectUnknown(s2)
neo.expectClusterRecovering()
# Starting missing storage allows cluster to exit Recovery without
# neoctl action.
s2.start()
neo.expectRunning(s1)
neo.expectRunning(s2)
neo.expectClusterRunning()
# Re-running cluster with a missing storage and allowing startup exits
# recovery.
neo.stop()
neo.run(except_storages=(s2, ))
neo.expectPending(s1)
neo.expectUnknown(s2)
neo.expectClusterRecovering()
neoctl.startCluster()
neo.expectRunning(s1)
neo.expectUnknown(s2)
neo.expectClusterRunning()
def testClusterBreaks(self):
self.neo = NEOCluster(['test_neo1'],
master_count=1, temp_dir=self.getTempDirectory())
self.neo.setupDB()
self.neo.start()
self.neo.expectClusterRunning()
self.neo.expectOudatedCells(number=0)
self.neo.killStorage()
self.neo.expectClusterVerifying()
def testClusterBreaksWithTwoNodes(self):
self.neo = NEOCluster(['test_neo1', 'test_neo2'],
partitions=2, master_count=1, replicas=0,
temp_dir=self.getTempDirectory())
self.neo.setupDB()
self.neo.start()
self.neo.expectClusterRunning()
self.neo.expectOudatedCells(number=0)
self.neo.killStorage()
self.neo.expectClusterVerifying()
def testClusterDoesntBreakWithTwoNodesOneReplica(self):
self.neo = NEOCluster(['test_neo1', 'test_neo2'],
partitions=2, replicas=1, master_count=1,
temp_dir=self.getTempDirectory())
self.neo.setupDB()
self.neo.start()
self.neo.expectClusterRunning()
self.neo.expectOudatedCells(number=0)
self.neo.killStorage()
self.neo.expectClusterRunning()
def testElectionWithManyMasters(self):
MASTER_COUNT = 20
self.neo = NEOCluster(['test_neo1', 'test_neo2'],
partitions=10, replicas=0, master_count=MASTER_COUNT,
temp_dir=self.getTempDirectory())
self.neo.start()
self.neo.expectClusterRunning()
self.neo.expectAllMasters(MASTER_COUNT, NodeStates.RUNNING)
self.neo.expectOudatedCells(0)
def testLeavingOperationalStateDropClientNodes(self):
"""
Check that client nodes are dropped where the cluster leaves the
operational state.
"""
# start a cluster
self.neo = NEOCluster(['test_neo1'], replicas=0,
temp_dir=self.getTempDirectory())
self.neo.start()
self.neo.expectClusterRunning()
self.neo.expectOudatedCells(0)
# connect a client a check it's known
db, conn = self.neo.getZODBConnection()
self.assertEqual(len(self.neo.getClientlist()), 1)
# drop the storage, the cluster is no more operational...
self.neo.getStorageProcessList()[0].stop()
self.neo.expectClusterVerifying()
# ...and the client gets disconnected
self.assertEqual(len(self.neo.getClientlist()), 0)
# restart storage so that the cluster is operational again
self.neo.getStorageProcessList()[0].start()
self.neo.expectClusterRunning()
self.neo.expectOudatedCells(0)
# and reconnect the client, there must be only one known by the admin
conn.root()['plop'] = 1
transaction.commit()
self.assertEqual(len(self.neo.getClientlist()), 1)
def testStorageLostDuringRecovery(self):
"""
Check that admin node receive notifications of storage
connection and disconnection during recovery
"""
self.neo = NEOCluster(['test_neo%d' % i for i in xrange(2)],
master_count=1, partitions=10, replicas=1,
temp_dir=self.getTempDirectory(), clear_databases=True,
)
storages = self.neo.getStorageProcessList()
self.neo.run(except_storages=storages)
self.neo.expectStorageNotKnown(storages[0])
self.neo.expectStorageNotKnown(storages[1])
storages[0].start()
self.neo.expectPending(storages[0])
self.neo.expectStorageNotKnown(storages[1])
storages[1].start()
self.neo.expectPending(storages[0])
self.neo.expectPending(storages[1])
storages[0].stop()
self.neo.expectUnavailable(storages[0])
self.neo.expectPending(storages[1])
storages[1].stop()
self.neo.expectUnavailable(storages[0])
self.neo.expectUnavailable(storages[1])
def test_suite():
return unittest.makeSuite(ClusterTests)
if __name__ == "__main__":
unittest.main(defaultTest="test_suite")
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/tests/functional/testMaster.py 0000664 0000000 0000000 00000012545 12601037530 0027675 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2009-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import unittest
from . import NEOCluster, NEOFunctionalTest
from neo.lib.protocol import NodeStates
MASTER_NODE_COUNT = 3
class MasterTests(NEOFunctionalTest):
def setUp(self):
NEOFunctionalTest.setUp(self)
self.neo = NEOCluster([], master_count=MASTER_NODE_COUNT,
temp_dir=self.getTempDirectory())
self.neo.stop()
self.neo.run()
def _tearDown(self, success):
self.neo.stop()
NEOFunctionalTest._tearDown(self, success)
def testStoppingSecondaryMaster(self):
# Wait for masters to stabilize
self.neo.expectAllMasters(MASTER_NODE_COUNT, NodeStates.RUNNING)
# Kill
neoctl = self.neo.neoctl
primary_uuid = neoctl.getPrimary()
for master in self.neo.getMasterProcessList():
uuid = master.getUUID()
if uuid != primary_uuid:
break
neoctl.killNode(uuid)
self.neo.expectDead(master)
self.assertRaises(RuntimeError, neoctl.killNode, primary_uuid)
def testStoppingPrimaryWithTwoSecondaries(self):
# Wait for masters to stabilize
self.neo.expectAllMasters(MASTER_NODE_COUNT)
# Kill
killed_uuid_list = self.neo.killPrimary()
# Test sanity check.
self.assertEqual(len(killed_uuid_list), 1)
uuid = killed_uuid_list[0]
# Check the state of the primary we just killed
self.neo.expectMasterState(uuid, (None, NodeStates.UNKNOWN))
# BUG: The following check expects neoctl to reconnect before
# the election finishes.
self.assertEqual(self.neo.getPrimary(), None)
# Check that a primary master arised.
self.neo.expectPrimary(timeout=10)
# Check that the uuid really changed.
new_uuid = self.neo.getPrimary()
self.assertNotEqual(new_uuid, uuid)
def testStoppingPrimaryWithOneSecondary(self):
self.neo.expectAllMasters(MASTER_NODE_COUNT,
state=NodeStates.RUNNING)
# Kill one secondary master.
killed_uuid_list = self.neo.killSecondaryMaster()
# Test sanity checks.
self.assertEqual(len(killed_uuid_list), 1)
self.neo.expectMasterState(killed_uuid_list[0], None)
self.assertEqual(len(self.neo.getMasterList()), 2)
uuid, = self.neo.killPrimary()
# Check the state of the primary we just killed
self.neo.expectMasterState(uuid, (None, NodeStates.UNKNOWN))
# Check that a primary master arised.
self.neo.expectPrimary(timeout=10)
# Check that the uuid really changed.
self.assertNotEqual(self.neo.getPrimary(), uuid)
def testMasterSequentialStart(self):
self.neo.expectAllMasters(MASTER_NODE_COUNT,
state=NodeStates.RUNNING)
master_list = self.neo.getMasterProcessList()
# Stop the cluster (so we can start processes manually)
self.neo.killMasters()
# Restart admin to make sure it knows all masters.
admin, = self.neo.getAdminProcessList()
admin.kill()
admin.wait()
admin.start()
# Start the first master.
first_master = master_list[0]
first_master.start()
first_master_uuid = first_master.getUUID()
# Check that the master node we started elected itself.
self.neo.expectPrimary(first_master_uuid, timeout=30)
# Check that no other node is known as running.
self.assertEqual(len(self.neo.getMasterList(
state=NodeStates.RUNNING)), 1)
# Start a second master.
second_master = master_list[1]
# Check that the second master is known as being down.
self.assertEqual(self.neo.getMasterNodeState(second_master.getUUID()),
None)
second_master.start()
# Check that the second master is running under his known UUID.
self.neo.expectMasterState(second_master.getUUID(),
NodeStates.RUNNING)
# Check that the primary master didn't change.
self.assertEqual(self.neo.getPrimary(), first_master_uuid)
# Start a third master.
third_master = master_list[2]
# Check that the third master is known as being down.
self.assertEqual(self.neo.getMasterNodeState(third_master.getUUID()),
None)
third_master.start()
# Check that the third master is running under his known UUID.
self.neo.expectMasterState(third_master.getUUID(),
NodeStates.RUNNING)
# Check that the primary master didn't change.
self.assertEqual(self.neo.getPrimary(), first_master_uuid)
def test_suite():
return unittest.makeSuite(MasterTests)
if __name__ == "__main__":
unittest.main(defaultTest="test_suite")
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/tests/functional/testStorage.py 0000664 0000000 0000000 00000044210 12601037530 0030040 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2009-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import time
import unittest
import transaction
from persistent import Persistent
from . import NEOCluster, NEOFunctionalTest
from neo.lib.protocol import ClusterStates, NodeStates
from ZODB.tests.StorageTestBase import zodb_pickle
class PObject(Persistent):
def __init__(self, value):
self.value = value
OBJECT_NUMBER = 100
class StorageTests(NEOFunctionalTest):
def _tearDown(self, success):
if hasattr(self, "neo"):
self.neo.stop()
del self.neo
NEOFunctionalTest._tearDown(self, success)
def __setup(self, storage_number=2, pending_number=0, replicas=1,
partitions=10, master_count=2):
# create a neo cluster
self.neo = NEOCluster(['test_neo%d' % i for i in xrange(storage_number)],
master_count=master_count,
partitions=partitions, replicas=replicas,
temp_dir=self.getTempDirectory(),
clear_databases=True,
)
# too many pending storage nodes requested
assert pending_number <= storage_number
storage_processes = self.neo.getStorageProcessList()
start_storage_number = len(storage_processes) - pending_number
# return a tuple of storage processes lists
started_processes = storage_processes[:start_storage_number]
stopped_processes = storage_processes[start_storage_number:]
self.neo.start(except_storages=stopped_processes)
return (started_processes, stopped_processes)
def __populate(self):
db, conn = self.neo.getZODBConnection()
root = conn.root()
for i in xrange(OBJECT_NUMBER):
root[i] = PObject(i)
transaction.commit()
conn.close()
db.close()
def __checkDatabase(self, db_name):
db = self.neo.getSQLConnection(db_name)
# wait for the sql transaction to be commited
def callback(last_try):
# One revision per object and two for the root, before and after
(object_number,), = db.query('SELECT count(*) FROM obj')
return object_number == OBJECT_NUMBER + 2, object_number
self.neo.expectCondition(callback)
# no more temporarily objects
(t_objects,), = db.query('SELECT count(*) FROM tobj')
self.assertEqual(t_objects, 0)
# One object more for the root
query = 'SELECT count(*) FROM (SELECT * FROM obj GROUP BY oid) AS t'
(objects,), = db.query(query)
self.assertEqual(objects, OBJECT_NUMBER + 1)
# Check object content
db, conn = self.neo.getZODBConnection()
root = conn.root()
for i in xrange(OBJECT_NUMBER):
obj = root[i]
self.assertEqual(obj.value, i)
transaction.abort()
conn.close()
db.close()
def __checkReplicationDone(self):
# wait for replication to finish
def expect_all_storages(last_try):
storage_number = len(self.neo.getStorageList())
return storage_number == len(self.neo.db_list), storage_number
self.neo.expectCondition(expect_all_storages, timeout=10)
self.neo.expectOudatedCells(number=0, timeout=10)
# check databases
for db_name in self.neo.db_list:
self.__checkDatabase(db_name)
# check storages state
storage_list = self.neo.getStorageList(NodeStates.RUNNING)
self.assertEqual(len(storage_list), 2)
def testNewNodesInPendingState(self):
""" Check that new storage nodes are set as pending, the cluster remains
running """
# start with the first storage
processes = self.__setup(storage_number=3, replicas=1, pending_number=2)
started, stopped = processes
self.neo.expectRunning(started[0])
self.neo.expectClusterRunning()
# start the second then the third
stopped[0].start()
self.neo.expectPending(stopped[0])
self.neo.expectClusterRunning()
stopped[1].start()
self.neo.expectPending(stopped[1])
self.neo.expectClusterRunning()
def testReplicationWithNewStorage(self):
""" create a cluster with one storage, populate it, add a new storage
then check the database content to ensure the replication process is
well done """
# populate one storage
processes = self.__setup(storage_number=2, replicas=1, pending_number=1,
partitions=10)
started, stopped = processes
self.neo.expectOudatedCells(number=0)
self.__populate()
self.neo.expectClusterRunning()
self.neo.expectAssignedCells(started[0], number=10)
# start the second
stopped[0].start()
self.neo.expectPending(stopped[0])
self.neo.expectClusterRunning()
# add it to the partition table
self.neo.neoctl.enableStorageList([stopped[0].getUUID()])
self.neo.expectRunning(stopped[0])
self.neo.neoctl.tweakPartitionTable()
self.neo.expectAssignedCells(stopped[0], number=10)
self.neo.expectClusterRunning()
# wait for replication to finish then check
self.__checkReplicationDone()
self.neo.expectClusterRunning()
def testOudatedCellsOnDownStorage(self):
""" Check that the storage cells are set as oudated when the node is
down, the cluster remains up since there is a replica """
# populate the two storages
started, _ = self.__setup(partitions=3, replicas=1, storage_number=3)
self.neo.expectRunning(started[0])
self.neo.expectRunning(started[1])
self.neo.expectRunning(started[2])
self.neo.expectOudatedCells(number=0)
self.neo.neoctl.killNode(started[0].getUUID())
# Cluster still operational. All cells of first storage should be
# outdated.
self.neo.expectUnavailable(started[0])
self.neo.expectOudatedCells(2)
self.neo.expectClusterRunning()
self.assertRaises(RuntimeError, self.neo.neoctl.killNode,
started[1].getUUID())
started[1].stop()
# Cluster not operational anymore. Only cells of second storage that
# were shared with the third one should become outdated.
self.neo.expectUnavailable(started[1])
self.neo.expectClusterVerifying()
self.neo.expectOudatedCells(3)
def testVerificationTriggered(self):
""" Check that the verification stage is executed when a storage node
required to be operationnal is lost, and the cluster come back in
running state when the storage is up again """
# start neo with one storages
(started, _) = self.__setup(replicas=0, storage_number=1)
self.neo.expectRunning(started[0])
self.neo.expectOudatedCells(number=0)
# add a client node
db, conn = self.neo.getZODBConnection()
root = conn.root()['test'] = 'ok'
transaction.commit()
self.assertEqual(len(self.neo.getClientlist()), 1)
# stop it, the cluster must switch to verification
started[0].stop()
self.neo.expectUnavailable(started[0])
self.neo.expectClusterVerifying()
# client must have been disconnected
self.assertEqual(len(self.neo.getClientlist()), 0)
conn.close()
db.close()
# restart it, the cluster must come back to running state
started[0].start()
self.neo.expectRunning(started[0])
self.neo.expectClusterRunning()
def testSequentialStorageKill(self):
""" Check that the cluster remains running until the last storage node
died when all are replicas """
# start neo with three storages / two replicas
(started, _) = self.__setup(replicas=2, storage_number=3, partitions=10)
self.neo.expectRunning(started[0])
self.neo.expectRunning(started[1])
self.neo.expectRunning(started[2])
self.neo.expectOudatedCells(number=0)
self.neo.expectClusterRunning()
# stop one storage, cluster must remains running
started[0].stop()
self.neo.expectUnavailable(started[0])
self.neo.expectRunning(started[1])
self.neo.expectRunning(started[2])
self.neo.expectOudatedCells(number=10)
self.neo.expectClusterRunning()
# stop a second storage, cluster is still running
started[1].stop()
self.neo.expectUnavailable(started[0])
self.neo.expectUnavailable(started[1])
self.neo.expectRunning(started[2])
self.neo.expectOudatedCells(number=20)
self.neo.expectClusterRunning()
# stop the last, cluster died
started[2].stop()
self.neo.expectUnavailable(started[0])
self.neo.expectUnavailable(started[1])
self.neo.expectUnavailable(started[2])
self.neo.expectOudatedCells(number=20)
self.neo.expectClusterVerifying()
def testConflictingStorageRejected(self):
""" Check that a storage coming after the recovery process with the same
UUID as another already running is refused """
# start with one storage
(started, stopped) = self.__setup(storage_number=2, pending_number=1)
self.neo.expectRunning(started[0])
self.neo.expectClusterRunning()
self.neo.expectOudatedCells(number=0)
# start the second with the same UUID as the first
stopped[0].setUUID(started[0].getUUID())
stopped[0].start()
self.neo.expectOudatedCells(number=0)
# check the first and the cluster are still running
self.neo.expectRunning(started[0])
self.neo.expectClusterRunning()
# XXX: should wait for the storage rejection
# check that no node were added
storage_number = len(self.neo.getStorageList())
self.assertEqual(storage_number, 1)
def testPartitionTableReorganizedWithNewStorage(self):
""" Check if the partition change when adding a new storage to a cluster
with one storage and no replicas """
# start with one storage and no replicas
(started, stopped) = self.__setup(storage_number=2, pending_number=1,
partitions=10, replicas=0)
self.neo.expectRunning(started[0])
self.neo.expectClusterRunning()
self.neo.expectAssignedCells(started[0], 10)
self.neo.expectOudatedCells(number=0)
# start the second and add it to the partition table
stopped[0].start()
self.neo.expectPending(stopped[0])
self.neo.neoctl.enableStorageList([stopped[0].getUUID()])
self.neo.neoctl.tweakPartitionTable()
self.neo.expectRunning(stopped[0])
self.neo.expectClusterRunning()
self.neo.expectOudatedCells(number=0)
# the partition table must change, each node should be assigned to
# five partitions
self.neo.expectAssignedCells(started[0], 5)
self.neo.expectAssignedCells(stopped[0], 5)
def testPartitionTableReorganizedAfterDrop(self):
""" Check that the partition change when dropping a replicas from a
cluster with two storages """
# start with two storage / one replicas
(started, stopped) = self.__setup(storage_number=2, replicas=1,
partitions=10, pending_number=0)
self.neo.expectRunning(started[0])
self.neo.expectRunning(started[1])
self.neo.expectOudatedCells(number=0)
self.neo.expectAssignedCells(started[0], 10)
self.neo.expectAssignedCells(started[1], 10)
# kill one storage, it should be set as unavailable
started[0].stop()
self.neo.expectUnavailable(started[0])
self.neo.expectRunning(started[1])
# and the partition table must not change
self.neo.expectAssignedCells(started[0], 10)
self.neo.expectAssignedCells(started[1], 10)
# ask neoctl to drop it
self.neo.neoctl.dropNode(started[0].getUUID())
self.neo.expectStorageNotKnown(started[0])
self.neo.expectAssignedCells(started[0], 0)
self.neo.expectAssignedCells(started[1], 10)
self.assertRaises(RuntimeError, self.neo.neoctl.dropNode,
started[1].getUUID())
self.neo.expectClusterRunning()
def testReplicationThenRunningWithReplicas(self):
""" Add a replicas to a cluster, wait for the replication to finish,
shutdown the first storage then check the new storage content """
# start with one storage
(started, stopped) = self.__setup(storage_number=2, replicas=1,
pending_number=1, partitions=10)
self.neo.expectRunning(started[0])
self.neo.expectStorageNotKnown(stopped[0])
self.neo.expectOudatedCells(number=0)
# populate the cluster with some data
self.__populate()
self.neo.expectClusterRunning()
self.neo.expectOudatedCells(number=0)
self.neo.expectAssignedCells(started[0], 10)
self.__checkDatabase(self.neo.db_list[0])
# add a second storage
stopped[0].start()
self.neo.expectPending(stopped[0])
self.neo.neoctl.enableStorageList([stopped[0].getUUID()])
self.neo.neoctl.tweakPartitionTable()
self.neo.expectRunning(stopped[0])
self.neo.expectClusterRunning()
self.neo.expectAssignedCells(started[0], 10)
self.neo.expectAssignedCells(stopped[0], 10)
# wait for replication to finish
self.neo.expectOudatedCells(number=0)
self.neo.expectClusterRunning()
self.__checkReplicationDone()
# kill the first storage
started[0].stop()
self.neo.expectUnavailable(started[0])
self.neo.expectOudatedCells(number=10)
self.neo.expectAssignedCells(started[0], 10)
self.neo.expectAssignedCells(stopped[0], 10)
self.neo.expectClusterRunning()
self.__checkDatabase(self.neo.db_list[0])
# drop it from partition table
self.neo.neoctl.dropNode(started[0].getUUID())
self.neo.expectStorageNotKnown(started[0])
self.neo.expectRunning(stopped[0])
self.neo.expectAssignedCells(started[0], 0)
self.neo.expectAssignedCells(stopped[0], 10)
self.__checkDatabase(self.neo.db_list[1])
def testStartWithManyPartitions(self):
""" Just tests that cluster can start with more than 1000 partitions.
1000, because currently there is an arbitrary packet split at
every 1000 partition when sending a partition table. """
self.__setup(storage_number=2, partitions=5000, master_count=1)
self.neo.expectClusterState(ClusterStates.RUNNING)
def testRecoveryWithMultiplePT(self):
# start a cluster with 2 storages and a replica
(started, stopped) = self.__setup(storage_number=2, replicas=1,
pending_number=0, partitions=10)
self.neo.expectRunning(started[0])
self.neo.expectRunning(started[1])
self.neo.expectOudatedCells(number=0)
self.neo.expectClusterRunning()
# drop the first then the second storage
started[0].stop()
self.neo.expectUnavailable(started[0])
self.neo.expectRunning(started[1])
self.neo.expectOudatedCells(number=10)
started[1].stop()
self.neo.expectUnavailable(started[0])
self.neo.expectUnavailable(started[1])
self.neo.expectOudatedCells(number=10)
self.neo.expectClusterVerifying()
# XXX: need to sync with storages first
self.neo.stop()
# restart the cluster with the first storage killed
self.neo.run(except_storages=[started[1]])
self.neo.expectPending(started[0])
self.neo.expectUnknown(started[1])
self.neo.expectClusterRecovering()
# Cluster doesn't know there are outdated cells
self.neo.expectOudatedCells(number=0)
started[1].start()
self.neo.expectRunning(started[0])
self.neo.expectRunning(started[1])
self.neo.expectClusterRunning()
self.neo.expectOudatedCells(number=0)
def testReplicationBlockedByUnfinished(self):
# start a cluster with 1 of 2 storages and a replica
(started, stopped) = self.__setup(storage_number=2, replicas=1,
pending_number=1, partitions=10)
self.neo.expectRunning(started[0])
self.neo.expectStorageNotKnown(stopped[0])
self.neo.expectOudatedCells(number=0)
self.neo.expectClusterRunning()
self.__populate()
self.neo.expectOudatedCells(number=0)
# start a transaction that will block the end of the replication
db, conn = self.neo.getZODBConnection()
st = conn._storage
t = transaction.Transaction()
t.user = 'user'
t.description = 'desc'
oid = st.new_oid()
rev = '\0' * 8
data = zodb_pickle(PObject(42))
st.tpc_begin(t)
st.store(oid, rev, data, '', t)
# start the oudated storage
stopped[0].start()
self.neo.expectPending(stopped[0])
self.neo.neoctl.enableStorageList([stopped[0].getUUID()])
self.neo.neoctl.tweakPartitionTable()
self.neo.expectRunning(stopped[0])
self.neo.expectClusterRunning()
self.neo.expectAssignedCells(started[0], 10)
self.neo.expectAssignedCells(stopped[0], 10)
# wait a bit, replication must not happen. This hack is required
# because we cannot gather informations directly from the storages
time.sleep(10)
self.neo.expectOudatedCells(number=10)
# finish the transaction, the replication must happen and finish
st.tpc_vote(t)
st.tpc_finish(t)
self.neo.expectOudatedCells(number=0, timeout=10)
if __name__ == "__main__":
unittest.main()
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/tests/master/ 0000775 0000000 0000000 00000000000 12601037530 0024312 5 ustar 00root root 0000000 0000000 neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/tests/master/__init__.py 0000664 0000000 0000000 00000000000 12601037530 0026411 0 ustar 00root root 0000000 0000000 neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/tests/master/testClientHandler.py 0000664 0000000 0000000 00000021210 12601037530 0030274 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2009-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import unittest
from mock import Mock
from .. import NeoUnitTestBase
from neo.lib.protocol import NodeTypes, NodeStates, Packets
from neo.master.handlers.client import ClientServiceHandler
from neo.master.app import Application
class MasterClientHandlerTests(NeoUnitTestBase):
def setUp(self):
NeoUnitTestBase.setUp(self)
# create an application object
config = self.getMasterConfiguration(master_number=1, replicas=1)
self.app = Application(config)
self.app.em.close()
self.app.pt.clear()
self.app.pt.setID(1)
self.app.em = Mock()
self.app.loid = '\0' * 8
self.app.tm.setLastTID('\0' * 8)
self.service = ClientServiceHandler(self.app)
# define some variable to simulate client and storage node
self.client_port = 11022
self.storage_port = 10021
self.master_port = 10010
self.master_address = ('127.0.0.1', self.master_port)
self.client_address = ('127.0.0.1', self.client_port)
self.storage_address = ('127.0.0.1', self.storage_port)
self.storage_uuid = self.getStorageUUID()
# register the storage
self.app.nm.createStorage(
uuid=self.storage_uuid,
address=self.storage_address,
)
def identifyToMasterNode(self, node_type=NodeTypes.STORAGE, ip="127.0.0.1",
port=10021):
"""Do first step of identification to MN """
# register the master itself
uuid = self.getNewUUID(node_type)
self.app.nm.createFromNodeType(
node_type,
address=(ip, port),
uuid=uuid,
state=NodeStates.RUNNING,
)
return uuid
# Tests
def test_07_askBeginTransaction(self):
tid1 = self.getNextTID()
tid2 = self.getNextTID()
service = self.service
tm_org = self.app.tm
self.app.tm = tm = Mock({
'begin': '\x00\x00\x00\x00\x00\x00\x00\x01',
})
# client call it
client_uuid = self.identifyToMasterNode(node_type=NodeTypes.CLIENT, port=self.client_port)
client_node = self.app.nm.getByUUID(client_uuid)
conn = self.getFakeConnection(client_uuid, self.client_address)
service.askBeginTransaction(conn, None)
calls = tm.mockGetNamedCalls('begin')
self.assertEqual(len(calls), 1)
calls[0].checkArgs(client_node, None)
self.checkAnswerBeginTransaction(conn)
# Client asks for a TID
conn = self.getFakeConnection(client_uuid, self.client_address)
self.app.tm = tm_org
service.askBeginTransaction(conn, tid1)
calls = tm.mockGetNamedCalls('begin')
self.assertEqual(len(calls), 1)
calls[0].checkArgs(client_node, None)
args = self.checkAnswerBeginTransaction(conn, decode=True)
self.assertEqual(args, (tid1, ))
def test_08_askNewOIDs(self):
service = self.service
oid1, oid2 = self.getOID(1), self.getOID(2)
self.app.tm.setLastOID(oid1)
# client call it
client_uuid = self.identifyToMasterNode(node_type=NodeTypes.CLIENT, port=self.client_port)
conn = self.getFakeConnection(client_uuid, self.client_address)
for node in self.app.nm.getStorageList():
conn = self.getFakeConnection(node.getUUID(), node.getAddress())
node.setConnection(conn)
service.askNewOIDs(conn, 1)
self.assertTrue(self.app.tm.getLastOID() > oid1)
def test_09_askFinishTransaction(self):
service = self.service
# do the right job
client_uuid = self.identifyToMasterNode(node_type=NodeTypes.CLIENT, port=self.client_port)
storage_uuid = self.storage_uuid
storage_conn = self.getFakeConnection(storage_uuid, self.storage_address)
storage2_uuid = self.identifyToMasterNode(port=10022)
storage2_conn = self.getFakeConnection(storage2_uuid,
(self.storage_address[0], self.storage_address[1] + 1))
self.app.setStorageReady(storage2_uuid)
conn = self.getFakeConnection(client_uuid, self.client_address)
self.app.pt = Mock({
'getPartition': 0,
'getCellList': [
Mock({'getUUID': storage_uuid}),
Mock({'getUUID': storage2_uuid}),
],
'getPartitions': 2,
})
ttid = self.getNextTID()
service.askBeginTransaction(conn, ttid)
oid_list = []
conn = self.getFakeConnection(client_uuid, self.client_address)
self.app.nm.getByUUID(storage_uuid).setConnection(storage_conn)
# No packet sent if storage node is not ready
self.assertFalse(self.app.isStorageReady(storage_uuid))
service.askFinishTransaction(conn, ttid, oid_list)
self.checkNoPacketSent(storage_conn)
self.app.tm.abortFor(self.app.nm.getByUUID(client_uuid))
# ...but AskLockInformation is sent if it is ready
self.app.setStorageReady(storage_uuid)
self.assertTrue(self.app.isStorageReady(storage_uuid))
service.askFinishTransaction(conn, ttid, oid_list)
self.checkAskLockInformation(storage_conn)
self.assertEqual(len(self.app.tm.registerForNotification(storage_uuid)), 1)
txn = self.app.tm[ttid]
pending_ttid = list(self.app.tm.registerForNotification(storage_uuid))[0]
self.assertEqual(ttid, pending_ttid)
self.assertEqual(len(txn.getOIDList()), 0)
self.assertEqual(len(txn.getUUIDList()), 1)
def test_askNodeInformations(self):
# check that only informations about master and storages nodes are
# send to a client
self.app.nm.createClient()
conn = self.getFakeConnection()
self.service.askNodeInformation(conn)
calls = conn.mockGetNamedCalls('notify')
self.assertEqual(len(calls), 1)
packet = calls[0].getParam(0)
(node_list, ) = packet.decode()
self.assertEqual(len(node_list), 2)
def test_connectionClosed(self):
# give a client uuid which have unfinished transactions
client_uuid = self.identifyToMasterNode(node_type=NodeTypes.CLIENT,
port = self.client_port)
conn = self.getFakeConnection(client_uuid, self.client_address)
self.app.listening_conn = object() # mark as running
lptid = self.app.pt.getID()
self.assertEqual(self.app.nm.getByUUID(client_uuid).getState(),
NodeStates.RUNNING)
self.service.connectionClosed(conn)
# node must be have been remove, and no more transaction must remains
self.assertEqual(self.app.nm.getByUUID(client_uuid), None)
self.assertEqual(lptid, self.app.pt.getID())
def test_askPack(self):
self.assertEqual(self.app.packing, None)
self.app.nm.createClient()
tid = self.getNextTID()
peer_id = 42
conn = self.getFakeConnection(peer_id=peer_id)
storage_uuid = self.storage_uuid
storage_conn = self.getFakeConnection(storage_uuid,
self.storage_address)
self.app.nm.getByUUID(storage_uuid).setConnection(storage_conn)
self.service.askPack(conn, tid)
self.checkNoPacketSent(conn)
ptid = self.checkAskPacket(storage_conn, Packets.AskPack,
decode=True)[0]
self.assertEqual(ptid, tid)
self.assertTrue(self.app.packing[0] is conn)
self.assertEqual(self.app.packing[1], peer_id)
self.assertEqual(self.app.packing[2], {storage_uuid})
# Asking again to pack will cause an immediate error
storage_uuid = self.identifyToMasterNode(port=10022)
storage_conn = self.getFakeConnection(storage_uuid,
self.storage_address)
self.app.nm.getByUUID(storage_uuid).setConnection(storage_conn)
self.service.askPack(conn, tid)
self.checkNoPacketSent(storage_conn)
status = self.checkAnswerPacket(conn, Packets.AnswerPack,
decode=True)[0]
self.assertFalse(status)
if __name__ == '__main__':
unittest.main()
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/tests/master/testElectionHandler.py 0000664 0000000 0000000 00000030666 12601037530 0030637 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2009-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import unittest
from mock import Mock
from neo.lib import protocol
from .. import NeoUnitTestBase
from neo.lib.protocol import NodeTypes, NodeStates
from neo.master.handlers.election import ClientElectionHandler, \
ServerElectionHandler
from neo.master.app import Application
from neo.lib.exception import ElectionFailure
from neo.lib.connection import ClientConnection
# patch connection so that we can register _addPacket messages
# in mock object
def _addPacket(self, packet):
if self.connector is not None:
self.connector._addPacket(packet)
class MasterClientElectionTestBase(NeoUnitTestBase):
def setUp(self):
super(MasterClientElectionTestBase, self).setUp()
self._master_port = 3001
def identifyToMasterNode(self):
node = self.app.nm.createMaster(uuid=self.getMasterUUID())
node.setAddress((self.local_ip, self._master_port))
self._master_port += 1
conn = self.getFakeConnection(
uuid=node.getUUID(),
address=node.getAddress(),
)
node.setConnection(conn)
return (node, conn)
class MasterClientElectionTests(MasterClientElectionTestBase):
def setUp(self):
super(MasterClientElectionTests, self).setUp()
# create an application object
config = self.getMasterConfiguration(master_number=1)
self.app = Application(config)
self.app.em.close()
self.app.pt.clear()
self.app.em = Mock()
self.app.uuid = self.getMasterUUID()
self.app.server = (self.local_ip, 10000)
self.app.name = 'NEOCLUSTER'
self.election = ClientElectionHandler(self.app)
self.app.unconnected_master_node_set = set()
self.app.negotiating_master_node_set = set()
# apply monkey patches
ClientConnection._addPacket = _addPacket
def _tearDown(self, success):
# restore patched methods
del ClientConnection._addPacket
NeoUnitTestBase._tearDown(self, success)
def _checkUnconnected(self, node):
addr = node.getAddress()
self.assertFalse(addr in self.app.negotiating_master_node_set)
def test_connectionFailed(self):
node, conn = self.identifyToMasterNode()
self.assertTrue(node.isUnknown())
self._checkUnconnected(node)
self.election.connectionFailed(conn)
self._checkUnconnected(node)
self.assertTrue(node.isUnknown())
def test_connectionCompleted(self):
node, conn = self.identifyToMasterNode()
self.assertTrue(node.isUnknown())
self._checkUnconnected(node)
self.election.connectionCompleted(conn)
self._checkUnconnected(node)
self.assertTrue(node.isUnknown())
self.checkRequestIdentification(conn)
def _setNegociating(self, node):
self._checkUnconnected(node)
addr = node.getAddress()
self.app.negotiating_master_node_set.add(addr)
def test_connectionClosed(self):
node, conn = self.identifyToMasterNode()
self._setNegociating(node)
self.election.connectionClosed(conn)
self.assertTrue(node.isUnknown())
addr = node.getAddress()
self.assertFalse(addr in self.app.negotiating_master_node_set)
def test_acceptIdentification1(self):
""" A non-master node accept identification """
node, conn = self.identifyToMasterNode()
args = (node.getUUID(), 0, 10, self.app.uuid, None,
self._getMasterList())
self.election.acceptIdentification(conn,
NodeTypes.CLIENT, *args)
self.assertFalse(node in self.app.negotiating_master_node_set)
self.checkClosed(conn)
def test_acceptIdentificationDoesNotKnowPrimary(self):
master1, master1_conn = self.identifyToMasterNode()
master1_uuid = master1.getUUID()
self.election.acceptIdentification(
master1_conn,
NodeTypes.MASTER,
master1_uuid,
1,
0,
self.app.uuid,
None,
[(master1.getAddress(), master1_uuid)],
)
self.assertEqual(self.app.primary_master_node, None)
def test_acceptIdentificationKnowsPrimary(self):
master1, master1_conn = self.identifyToMasterNode()
master1_uuid = master1.getUUID()
primary1 = master1.getAddress()
self.election.acceptIdentification(
master1_conn,
NodeTypes.MASTER,
master1_uuid,
1,
0,
self.app.uuid,
primary1,
[(master1.getAddress(), master1_uuid)],
)
self.assertNotEqual(self.app.primary_master_node, None)
def test_acceptIdentificationMultiplePrimaries(self):
master1, master1_conn = self.identifyToMasterNode()
master2, master2_conn = self.identifyToMasterNode()
master3, _ = self.identifyToMasterNode()
master1_uuid = master1.getUUID()
master2_uuid = master2.getUUID()
master3_uuid = master3.getUUID()
primary1 = master1.getAddress()
primary3 = master3.getAddress()
master1_address = master1.getAddress()
master2_address = master2.getAddress()
master3_address = master3.getAddress()
self.election.acceptIdentification(
master1_conn,
NodeTypes.MASTER,
master1_uuid,
1,
0,
self.app.uuid,
primary1,
[(master1_address, master1_uuid)],
)
self.assertRaises(ElectionFailure, self.election.acceptIdentification,
master2_conn,
NodeTypes.MASTER,
master2_uuid,
1,
0,
self.app.uuid,
primary3,
[
(master1_address, master1_uuid),
(master2_address, master2_uuid),
(master3_address, master3_uuid),
],
)
def test_acceptIdentification3(self):
""" Identification accepted """
node, conn = self.identifyToMasterNode()
args = (node.getUUID(), 0, 10, self.app.uuid, None,
self._getMasterList())
self.election.acceptIdentification(conn, NodeTypes.MASTER, *args)
self.checkUUIDSet(conn, node.getUUID())
self.assertEqual(self.app.primary is False,
self.app.server < node.getAddress())
self.assertFalse(node in self.app.negotiating_master_node_set)
def _getMasterList(self, with_node=None):
master_list = self.app.nm.getMasterList()
return [(x.getAddress(), x.getUUID()) for x in master_list]
class MasterServerElectionTests(MasterClientElectionTestBase):
def setUp(self):
super(MasterServerElectionTests, self).setUp()
# create an application object
config = self.getMasterConfiguration(master_number=1)
self.app = Application(config)
self.app.em.close()
self.app.pt.clear()
self.app.name = 'NEOCLUSTER'
self.app.em = Mock()
self.election = ServerElectionHandler(self.app)
self.app.unconnected_master_node_set = set()
self.app.negotiating_master_node_set = set()
for node in self.app.nm.getMasterList():
node.setState(NodeStates.RUNNING)
# define some variable to simulate client and storage node
self.client_address = (self.local_ip, 1000)
self.storage_address = (self.local_ip, 2000)
self.master_address = (self.local_ip, 3000)
# apply monkey patches
ClientConnection._addPacket = _addPacket
def _tearDown(self, success):
NeoUnitTestBase._tearDown(self, success)
# restore environnement
del ClientConnection._addPacket
def test_requestIdentification1(self):
""" A non-master node request identification """
node, conn = self.identifyToMasterNode()
args = (node.getUUID(), node.getAddress(), self.app.name)
self.assertRaises(protocol.NotReadyError,
self.election.requestIdentification,
conn, NodeTypes.CLIENT, *args)
def test_requestIdentification3(self):
""" A broken master node request identification """
node, conn = self.identifyToMasterNode()
node.setBroken()
args = (node.getUUID(), node.getAddress(), self.app.name)
self.assertRaises(protocol.BrokenNodeDisallowedError,
self.election.requestIdentification,
conn, NodeTypes.MASTER, *args)
def test_requestIdentification4(self):
""" No conflict """
node, conn = self.identifyToMasterNode()
args = (node.getUUID(), node.getAddress(), self.app.name)
self.election.requestIdentification(conn,
NodeTypes.MASTER, *args)
self.checkUUIDSet(conn, node.getUUID())
args = self.checkAcceptIdentification(conn, decode=True)
(node_type, uuid, partitions, replicas, new_uuid, primary_uuid,
master_list) = args
self.assertEqual(node.getUUID(), new_uuid)
self.assertNotEqual(node.getUUID(), uuid)
def _getNodeList(self):
return [x.asTuple() for x in self.app.nm.getList()]
def __getClient(self):
uuid = self.getClientUUID()
conn = self.getFakeConnection(uuid=uuid, address=self.client_address)
self.app.nm.createClient(uuid=uuid, address=self.client_address)
return conn
def __getMaster(self, port=1000, register=True):
uuid = self.getMasterUUID()
address = ('127.0.0.1', port)
conn = self.getFakeConnection(uuid=uuid, address=address)
if register:
self.app.nm.createMaster(uuid=uuid, address=address)
return conn
def testRequestIdentification1(self):
""" Check with a non-master node, must be refused """
conn = self.__getClient()
self.checkNotReadyErrorRaised(
self.election.requestIdentification,
conn=conn,
node_type=NodeTypes.CLIENT,
uuid=conn.getUUID(),
address=conn.getAddress(),
name=self.app.name
)
def _requestIdentification(self):
conn = self.getFakeConnection()
peer_uuid = self.getMasterUUID()
address = (self.local_ip, 2001)
self.election.requestIdentification(
conn,
NodeTypes.MASTER,
peer_uuid,
address,
self.app.name,
)
node_type, uuid, partitions, replicas, _peer_uuid, primary, \
master_list = self.checkAcceptIdentification(conn, decode=True)
self.assertEqual(node_type, NodeTypes.MASTER)
self.assertEqual(uuid, self.app.uuid)
self.assertEqual(partitions, self.app.pt.getPartitions())
self.assertEqual(replicas, self.app.pt.getReplicas())
self.assertTrue(address in [x[0] for x in master_list])
self.assertTrue(self.app.server in [x[0] for x in master_list])
self.assertEqual(peer_uuid, _peer_uuid)
return primary
def testRequestIdentificationDoesNotKnowPrimary(self):
self.app.primary = False
self.app.primary_master_node = None
self.assertEqual(self._requestIdentification(), None)
def testRequestIdentificationKnowsPrimary(self):
self.app.primary = False
primary = (self.local_ip, 3000)
self.app.primary_master_node = Mock({
'getAddress': primary,
})
self.assertEqual(self._requestIdentification(), primary)
def testRequestIdentificationIsPrimary(self):
self.app.primary = True
primary = self.app.server
self.app.primary_master_node = Mock({
'getAddress': primary,
})
self.assertEqual(self._requestIdentification(), primary)
def test_reelectPrimary(self):
node, conn = self.identifyToMasterNode()
self.assertRaises(ElectionFailure, self.election.reelectPrimary, conn)
if __name__ == '__main__':
unittest.main()
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/tests/master/testMasterApp.py 0000664 0000000 0000000 00000010447 12601037530 0027466 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2009-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import unittest
from .. import NeoUnitTestBase
from neo.master.app import Application
class MasterAppTests(NeoUnitTestBase):
def setUp(self):
NeoUnitTestBase.setUp(self)
# create an application object
config = self.getMasterConfiguration()
self.app = Application(config)
self.app.pt.clear()
def _tearDown(self, success):
self.app.close()
NeoUnitTestBase._tearDown(self, success)
def test_06_broadcastNodeInformation(self):
# defined some nodes to which data will be send
master_uuid = self.getMasterUUID()
master = self.app.nm.createMaster(uuid=master_uuid)
storage_uuid = self.getStorageUUID()
storage = self.app.nm.createStorage(uuid=storage_uuid)
client_uuid = self.getClientUUID()
client = self.app.nm.createClient(uuid=client_uuid)
# create conn and patch em
master_conn = self.getFakeConnection()
storage_conn = self.getFakeConnection()
client_conn = self.getFakeConnection()
master.setConnection(master_conn)
storage.setConnection(storage_conn)
client.setConnection(client_conn)
master.setRunning()
client.setRunning()
storage.setRunning()
self.app.nm.add(storage)
self.app.nm.add(client)
# no address defined, not send to client node
c_node = self.app.nm.createClient(uuid=self.getClientUUID())
self.app.broadcastNodesInformation([c_node])
# check conn
self.checkNoPacketSent(client_conn)
self.checkNoPacketSent(master_conn)
self.checkNotifyNodeInformation(storage_conn)
# address defined and client type
s_node = self.app.nm.createClient(
uuid=self.getClientUUID(),
address=("127.1.0.1", 3361)
)
self.app.broadcastNodesInformation([c_node])
# check conn
self.checkNoPacketSent(client_conn)
self.checkNoPacketSent(master_conn)
self.checkNotifyNodeInformation(storage_conn)
# address defined and storage type
s_node = self.app.nm.createStorage(
uuid=self.getStorageUUID(),
address=("127.0.0.1", 1351)
)
self.app.broadcastNodesInformation([s_node])
# check conn
self.checkNotifyNodeInformation(client_conn)
self.checkNoPacketSent(master_conn)
self.checkNotifyNodeInformation(storage_conn)
# node not running, don't send informations
client.setPending()
self.app.broadcastNodesInformation([s_node])
# check conn
self.assertFalse(client_conn.mockGetNamedCalls('notify'))
self.checkNoPacketSent(master_conn)
self.checkNotifyNodeInformation(storage_conn)
def test_storageReadinessAPI(self):
uuid_1 = self.getStorageUUID()
uuid_2 = self.getStorageUUID()
self.assertFalse(self.app.isStorageReady(uuid_1))
self.assertFalse(self.app.isStorageReady(uuid_2))
# Must not raise, nor change readiness
self.app.setStorageNotReady(uuid_1)
self.assertFalse(self.app.isStorageReady(uuid_1))
self.assertFalse(self.app.isStorageReady(uuid_2))
# Mark as ready, only one must change
self.app.setStorageReady(uuid_1)
self.assertTrue(self.app.isStorageReady(uuid_1))
self.assertFalse(self.app.isStorageReady(uuid_2))
self.app.setStorageReady(uuid_2)
# Mark not ready, only one must change
self.app.setStorageNotReady(uuid_1)
self.assertFalse(self.app.isStorageReady(uuid_1))
self.assertTrue(self.app.isStorageReady(uuid_2))
if __name__ == '__main__':
unittest.main()
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/tests/master/testMasterPT.py 0000664 0000000 0000000 00000027340 12601037530 0027271 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2009-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import unittest
from collections import defaultdict
from mock import Mock
from .. import NeoUnitTestBase
from neo.lib.protocol import NodeStates, CellStates
from neo.lib.pt import PartitionTableException
from neo.master.pt import PartitionTable
from neo.lib.node import StorageNode
class MasterPartitionTableTests(NeoUnitTestBase):
def test_02_PartitionTable_creation(self):
num_partitions = 5
num_replicas = 3
pt = PartitionTable(num_partitions, num_replicas)
self.assertEqual(pt.np, num_partitions)
self.assertEqual(pt.nr, num_replicas)
self.assertEqual(pt.num_filled_rows, 0)
partition_list = pt.partition_list
self.assertEqual(len(partition_list), num_partitions)
for x in xrange(num_partitions):
part = partition_list[x]
self.assertTrue(isinstance(part, list))
self.assertEqual(len(part), 0)
self.assertEqual(len(pt.count_dict), 0)
# no nodes or cells for now
self.assertFalse(pt.getNodeSet())
for x in xrange(num_partitions):
self.assertEqual(len(pt.getCellList(x)), 0)
self.assertEqual(len(pt.getCellList(x, True)), 0)
self.assertEqual(len(pt.getRow(x)), 0)
self.assertFalse(pt.operational())
self.assertFalse(pt.filled())
self.assertRaises(RuntimeError, pt.make, [])
self.assertFalse(pt.operational())
self.assertFalse(pt.filled())
def test_13_outdate(self):
# create nodes
uuid1 = self.getStorageUUID()
server1 = ("127.0.0.1", 19001)
sn1 = StorageNode(Mock(), server1, uuid1)
uuid2 = self.getStorageUUID()
server2 = ("127.0.0.2", 19002)
sn2 = StorageNode(Mock(), server2, uuid2)
uuid3 = self.getStorageUUID()
server3 = ("127.0.0.3", 19003)
sn3 = StorageNode(Mock(), server3, uuid3)
uuid4 = self.getStorageUUID()
server4 = ("127.0.0.4", 19004)
sn4 = StorageNode(Mock(), server4, uuid4)
uuid5 = self.getStorageUUID()
server5 = ("127.0.0.5", 19005)
sn5 = StorageNode(Mock(), server5, uuid5)
# create partition table
num_partitions = 5
num_replicas = 3
pt = PartitionTable(num_partitions, num_replicas)
pt.setCell(0, sn1, CellStates.OUT_OF_DATE)
sn1.setState(NodeStates.RUNNING)
pt.setCell(1, sn2, CellStates.UP_TO_DATE)
sn2.setState(NodeStates.TEMPORARILY_DOWN)
pt.setCell(2, sn3, CellStates.UP_TO_DATE)
sn3.setState(NodeStates.DOWN)
pt.setCell(3, sn4, CellStates.UP_TO_DATE)
sn4.setState(NodeStates.BROKEN)
pt.setCell(4, sn5, CellStates.UP_TO_DATE)
sn5.setState(NodeStates.RUNNING)
# outdate nodes
cells_outdated = pt.outdate()
self.assertEqual(len(cells_outdated), 3)
for offset, uuid, state in cells_outdated:
self.assertTrue(offset in (1, 2, 3))
self.assertTrue(uuid in (uuid2, uuid3, uuid4))
self.assertEqual(state, CellStates.OUT_OF_DATE)
# check each cell
# part 1, already outdated
cells = pt.getCellList(0)
self.assertEqual(len(cells), 1)
cell = cells[0]
self.assertEqual(cell.getState(), CellStates.OUT_OF_DATE)
# part 2, must be outdated
cells = pt.getCellList(1)
self.assertEqual(len(cells), 1)
cell = cells[0]
self.assertEqual(cell.getState(), CellStates.OUT_OF_DATE)
# part 3, must be outdated
cells = pt.getCellList(2)
self.assertEqual(len(cells), 1)
cell = cells[0]
self.assertEqual(cell.getState(), CellStates.OUT_OF_DATE)
# part 4, already outdated
cells = pt.getCellList(3)
self.assertEqual(len(cells), 1)
cell = cells[0]
self.assertEqual(cell.getState(), CellStates.OUT_OF_DATE)
# part 5, remains running
cells = pt.getCellList(4)
self.assertEqual(len(cells), 1)
cell = cells[0]
self.assertEqual(cell.getState(), CellStates.UP_TO_DATE)
def test_15_dropNodeList(self):
sn = [StorageNode(Mock(), None, i + 1, NodeStates.RUNNING)
for i in xrange(3)]
pt = PartitionTable(3, 0)
pt.setCell(0, sn[0], CellStates.OUT_OF_DATE)
pt.setCell(1, sn[1], CellStates.FEEDING)
pt.setCell(1, sn[2], CellStates.OUT_OF_DATE)
pt.setCell(2, sn[0], CellStates.OUT_OF_DATE)
pt.setCell(2, sn[1], CellStates.FEEDING)
pt.setCell(2, sn[2], CellStates.UP_TO_DATE)
self.assertEqual(sorted(pt.dropNodeList(sn[:1], True)), [
(0, 1, CellStates.DISCARDED),
(2, 1, CellStates.DISCARDED),
(2, 2, CellStates.UP_TO_DATE)])
self.assertEqual(sorted(pt.dropNodeList(sn[2:], True)), [
(1, 2, CellStates.UP_TO_DATE),
(1, 3, CellStates.DISCARDED),
(2, 2, CellStates.UP_TO_DATE),
(2, 3, CellStates.DISCARDED)])
self.assertRaises(PartitionTableException, pt.dropNodeList, sn[1:2])
pt.setCell(1, sn[2], CellStates.UP_TO_DATE)
self.assertEqual(sorted(pt.dropNodeList(sn[1:2])), [
(1, 2, CellStates.DISCARDED),
(2, 2, CellStates.DISCARDED)])
self.assertEqual(self.tweak(pt), [(2, 3, CellStates.FEEDING)])
def test_16_make(self):
num_partitions = 5
num_replicas = 1
pt = PartitionTable(num_partitions, num_replicas)
# add nodes
uuid1 = self.getStorageUUID()
server1 = ("127.0.0.1", 19001)
sn1 = StorageNode(Mock(), server1, uuid1, NodeStates.RUNNING)
# add not running node
uuid2 = self.getStorageUUID()
server2 = ("127.0.0.2", 19001)
sn2 = StorageNode(Mock(), server2, uuid2)
sn2.setState(NodeStates.TEMPORARILY_DOWN)
# add node without uuid
server3 = ("127.0.0.3", 19001)
sn3 = StorageNode(Mock(), server3, None, NodeStates.RUNNING)
# add clear node
uuid4 = self.getStorageUUID()
server4 = ("127.0.0.4", 19001)
sn4 = StorageNode(Mock(), server4, uuid4, NodeStates.RUNNING)
uuid5 = self.getStorageUUID()
server5 = ("127.0.0.5", 1900)
sn5 = StorageNode(Mock(), server5, uuid5, NodeStates.RUNNING)
# make the table
pt.make([sn1, sn2, sn3, sn4, sn5])
# check it's ok, only running nodes and node with uuid
# must be present
for x in xrange(num_partitions):
cells = pt.getCellList(x)
self.assertEqual(len(cells), 2)
nodes = [x.getNode() for x in cells]
for node in nodes:
self.assertTrue(node in (sn1, sn4, sn5))
self.assertTrue(node not in (sn2, sn3))
self.assertTrue(pt.filled())
self.assertTrue(pt.operational())
# create a pt with less nodes
pt.clear()
self.assertFalse(pt.filled())
self.assertFalse(pt.operational())
pt.make([sn1])
# check it's ok
for x in xrange(num_partitions):
cells = pt.getCellList(x)
self.assertEqual(len(cells), 1)
nodes = [x.getNode() for x in cells]
for node in nodes:
self.assertEqual(node, sn1)
self.assertTrue(pt.filled())
self.assertTrue(pt.operational())
def _pt_states(self, pt):
node_dict = defaultdict(list)
for offset, row in enumerate(pt.partition_list):
for cell in row:
state_list = node_dict[cell.getNode()]
if state_list:
self.assertTrue(state_list[-1][0] < offset)
state_list.append((offset, str(cell.getState())[0]))
return map(dict, sorted(node_dict.itervalues()))
def checkPT(self, pt, exclude_empty=False):
new_pt = PartitionTable(pt.np, pt.nr)
new_pt.make(node for node, count in pt.count_dict.iteritems()
if count or not exclude_empty)
self.assertEqual(self._pt_states(pt), self._pt_states(new_pt))
def update(self, pt, change_list=None):
if change_list is None:
for offset, row in enumerate(pt.partition_list):
for cell in list(row):
if cell.isOutOfDate():
pt.setUpToDate(cell.getNode(), offset)
else:
node_dict = {x.getUUID(): x for x in pt.count_dict}
for offset, uuid, state in change_list:
if state is CellStates.OUT_OF_DATE:
pt.setUpToDate(node_dict[uuid], offset)
def tweak(self, pt, drop_list=()):
change_list = pt.tweak(drop_list)
self.assertFalse(pt.tweak(drop_list))
return change_list
def test_17_tweak(self):
sn = [StorageNode(Mock(), None, i + 1, NodeStates.RUNNING)
for i in xrange(5)]
pt = PartitionTable(5, 2)
# part 0
pt.setCell(0, sn[0], CellStates.DISCARDED)
pt.setCell(0, sn[1], CellStates.UP_TO_DATE)
# part 1
pt.setCell(1, sn[0], CellStates.FEEDING)
pt.setCell(1, sn[1], CellStates.FEEDING)
pt.setCell(1, sn[2], CellStates.OUT_OF_DATE)
# part 2
pt.setCell(2, sn[0], CellStates.FEEDING)
pt.setCell(2, sn[1], CellStates.UP_TO_DATE)
pt.setCell(2, sn[2], CellStates.UP_TO_DATE)
# part 3
pt.setCell(3, sn[0], CellStates.UP_TO_DATE)
pt.setCell(3, sn[1], CellStates.UP_TO_DATE)
pt.setCell(3, sn[2], CellStates.UP_TO_DATE)
pt.setCell(3, sn[3], CellStates.UP_TO_DATE)
# part 4
pt.setCell(4, sn[0], CellStates.UP_TO_DATE)
pt.setCell(4, sn[4], CellStates.UP_TO_DATE)
count_dict = defaultdict(int)
change_list = self.tweak(pt)
for offset, uuid, state in change_list:
count_dict[state] += 1
self.assertEqual(count_dict, {CellStates.DISCARDED: 3,
CellStates.OUT_OF_DATE: 5,
CellStates.UP_TO_DATE: 3})
self.update(pt, change_list)
self.checkPT(pt)
self.assertRaises(PartitionTableException, pt.dropNodeList, sn[1:4])
self.assertEqual(6, len(pt.dropNodeList(sn[1:3], True)))
self.assertEqual(3, len(pt.dropNodeList([sn[1]])))
pt.addNodeList([sn[1]])
change_list = self.tweak(pt)
self.assertEqual(3, len(change_list))
self.update(pt, change_list)
self.checkPT(pt)
for np, i in (12, 0), (12, 1), (13, 2):
pt = PartitionTable(np, i)
i += 1
pt.make(sn[:i])
for n in sn[i:i+3]:
self.assertEqual([n], pt.addNodeList([n]))
self.update(pt, self.tweak(pt))
self.checkPT(pt)
pt.clear()
pt.make(sn[:i])
for n in sn[i:i+3]:
self.assertEqual([n], pt.addNodeList([n]))
self.tweak(pt)
self.update(pt)
self.checkPT(pt)
pt = PartitionTable(7, 0)
pt.make(sn[:1])
pt.addNodeList(sn[1:3])
self.update(pt, self.tweak(pt, sn[:1]))
self.checkPT(pt, True)
if __name__ == '__main__':
unittest.main()
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/tests/master/testRecovery.py 0000664 0000000 0000000 00000012771 12601037530 0027372 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2009-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import unittest
from .. import NeoUnitTestBase
from neo.lib.protocol import NodeTypes, NodeStates, CellStates
from neo.master.recovery import RecoveryManager
from neo.master.app import Application
class MasterRecoveryTests(NeoUnitTestBase):
def setUp(self):
NeoUnitTestBase.setUp(self)
# create an application object
config = self.getMasterConfiguration()
self.app = Application(config)
self.app.pt.clear()
self.recovery = RecoveryManager(self.app)
self.app.unconnected_master_node_set = set()
self.app.negotiating_master_node_set = set()
for node in self.app.nm.getMasterList():
self.app.unconnected_master_node_set.add(node.getAddress())
node.setState(NodeStates.RUNNING)
# define some variable to simulate client and storage node
self.client_port = 11022
self.storage_port = 10021
self.master_port = 10011
self.master_address = ('127.0.0.1', self.master_port)
self.storage_address = ('127.0.0.1', self.storage_port)
def _tearDown(self, success):
self.app.close()
NeoUnitTestBase._tearDown(self, success)
# Common methods
def identifyToMasterNode(self, node_type=NodeTypes.STORAGE, ip="127.0.0.1",
port=10021):
"""Do first step of identification to MN
"""
address = (ip, port)
uuid = self.getNewUUID(node_type)
self.app.nm.createFromNodeType(node_type, address=address, uuid=uuid,
state=NodeStates.RUNNING)
return uuid
# Tests
def test_01_connectionClosed(self):
uuid = self.identifyToMasterNode(node_type=NodeTypes.MASTER, port=self.master_port)
conn = self.getFakeConnection(uuid, self.master_address)
self.assertEqual(self.app.nm.getByAddress(conn.getAddress()).getState(),
NodeStates.RUNNING)
self.recovery.connectionClosed(conn)
self.assertEqual(self.app.nm.getByAddress(conn.getAddress()).getState(),
NodeStates.TEMPORARILY_DOWN)
def test_09_answerLastIDs(self):
recovery = self.recovery
uuid = self.identifyToMasterNode()
oid1 = self.getOID(1)
oid2 = self.getOID(2)
tid1 = self.getNextTID()
tid2 = self.getNextTID(tid1)
ptid1 = self.getPTID(1)
ptid2 = self.getPTID(2)
self.app.tm.setLastOID(oid1)
self.app.tm.setLastTID(tid1)
self.app.pt.setID(ptid1)
# send information which are later to what PMN knows, this must update target node
conn = self.getFakeConnection(uuid, self.storage_port)
self.assertTrue(ptid2 > self.app.pt.getID())
self.assertTrue(oid2 > self.app.tm.getLastOID())
self.assertTrue(tid2 > self.app.tm.getLastTID())
recovery.answerLastIDs(conn, oid2, tid2, ptid2, None)
self.assertEqual(oid2, self.app.tm.getLastOID())
self.assertEqual(tid2, self.app.tm.getLastTID())
self.assertEqual(ptid2, recovery.target_ptid)
def test_10_answerPartitionTable(self):
recovery = self.recovery
uuid = self.identifyToMasterNode(NodeTypes.MASTER, port=self.master_port)
# not from target node, ignore
uuid = self.identifyToMasterNode(NodeTypes.STORAGE, port=self.storage_port)
conn = self.getFakeConnection(uuid, self.storage_port)
node = self.app.nm.getByUUID(conn.getUUID())
offset = 1
cell_list = [(offset, uuid, CellStates.UP_TO_DATE)]
cells = self.app.pt.getRow(offset)
for cell, state in cells:
self.assertEqual(state, CellStates.OUT_OF_DATE)
recovery.target_ptid = 2
node.setPending()
recovery.answerPartitionTable(conn, 1, cell_list)
cells = self.app.pt.getRow(offset)
for cell, state in cells:
self.assertEqual(state, CellStates.OUT_OF_DATE)
# from target node, taken into account
conn = self.getFakeConnection(uuid, self.storage_port)
offset = 1
cell_list = [(offset, ((uuid, CellStates.UP_TO_DATE,),),)]
cells = self.app.pt.getRow(offset)
for cell, state in cells:
self.assertEqual(state, CellStates.OUT_OF_DATE)
node.setPending()
recovery.answerPartitionTable(conn, None, cell_list)
cells = self.app.pt.getRow(offset)
for cell, state in cells:
self.assertEqual(state, CellStates.UP_TO_DATE)
# give a bad offset, must send error
self.recovery.target_uuid = uuid
conn = self.getFakeConnection(uuid, self.storage_port)
offset = 1000000
self.assertFalse(self.app.pt.hasOffset(offset))
cell_list = [(offset, ((uuid, NodeStates.DOWN,),),)]
node.setPending()
self.checkProtocolErrorRaised(recovery.answerPartitionTable, conn,
2, cell_list)
if __name__ == '__main__':
unittest.main()
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/tests/master/testStorageHandler.py 0000664 0000000 0000000 00000025170 12601037530 0030473 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2009-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import unittest
from mock import Mock
from .. import NeoUnitTestBase
from neo.lib.protocol import NodeTypes, NodeStates, Packets
from neo.master.handlers.storage import StorageServiceHandler
from neo.master.handlers.client import ClientServiceHandler
from neo.master.app import Application
from neo.lib.exception import OperationFailure
class MasterStorageHandlerTests(NeoUnitTestBase):
def setUp(self):
NeoUnitTestBase.setUp(self)
# create an application object
config = self.getMasterConfiguration(master_number=1, replicas=1)
self.app = Application(config)
self.app.em.close()
self.app.pt.clear()
self.app.em = Mock()
self.service = StorageServiceHandler(self.app)
self.client_handler = ClientServiceHandler(self.app)
# define some variable to simulate client and storage node
self.client_port = 11022
self.storage_port = 10021
self.master_port = 10010
self.master_address = ('127.0.0.1', self.master_port)
self.client_address = ('127.0.0.1', self.client_port)
self.storage_address = ('127.0.0.1', self.storage_port)
def _allocatePort(self):
self.port = getattr(self, 'port', 1000) + 1
return self.port
def _getClient(self):
return self.identifyToMasterNode(node_type=NodeTypes.CLIENT,
ip='127.0.0.1', port=self._allocatePort())
def _getStorage(self):
return self.identifyToMasterNode(node_type=NodeTypes.STORAGE,
ip='127.0.0.1', port=self._allocatePort())
def identifyToMasterNode(self, node_type=NodeTypes.STORAGE, ip="127.0.0.1",
port=10021):
"""Do first step of identification to MN
"""
nm = self.app.nm
uuid = self.getNewUUID(node_type)
node = nm.createFromNodeType(node_type, address=(ip, port),
uuid=uuid)
conn = self.getFakeConnection(node.getUUID(), node.getAddress())
node.setConnection(conn)
return (node, conn)
def test_answerInformationLocked_1(self):
"""
Master must refuse to lock if the TID is greater than the last TID
"""
tid1 = self.getNextTID()
tid2 = self.getNextTID(tid1)
self.app.tm.setLastTID(tid1)
self.assertTrue(tid1 < tid2)
node, conn = self.identifyToMasterNode()
self.checkProtocolErrorRaised(self.service.answerInformationLocked,
conn, tid2)
self.checkNoPacketSent(conn)
def test_answerInformationLocked_2(self):
"""
Master must:
- lock each storage
- notify the client
- invalidate other clients
- unlock storages
"""
# one client and two storages required
client_1, client_conn_1 = self._getClient()
client_2, client_conn_2 = self._getClient()
storage_1, storage_conn_1 = self._getStorage()
storage_2, storage_conn_2 = self._getStorage()
uuid_list = storage_1.getUUID(), storage_2.getUUID()
oid_list = self.getOID(), self.getOID()
msg_id = 1
# register a transaction
ttid = self.app.tm.begin(client_1)
tid = self.app.tm.prepare(ttid, 1, oid_list, uuid_list,
msg_id)
self.assertTrue(ttid in self.app.tm)
# the first storage acknowledge the lock
self.service.answerInformationLocked(storage_conn_1, ttid)
self.checkNoPacketSent(client_conn_1)
self.checkNoPacketSent(client_conn_2)
self.checkNoPacketSent(storage_conn_1)
self.checkNoPacketSent(storage_conn_2)
# then the second
self.service.answerInformationLocked(storage_conn_2, ttid)
self.checkAnswerTransactionFinished(client_conn_1)
self.checkInvalidateObjects(client_conn_2)
self.checkNotifyUnlockInformation(storage_conn_1)
self.checkNotifyUnlockInformation(storage_conn_2)
def test_12_askLastIDs(self):
service = self.service
node, conn = self.identifyToMasterNode()
# give a uuid
conn = self.getFakeConnection(node.getUUID(), self.storage_address)
ptid = self.app.pt.getID()
oid = self.getOID(1)
tid = self.getNextTID()
self.app.tm.setLastOID(oid)
self.app.tm.setLastTID(tid)
service.askLastIDs(conn)
packet = self.checkAnswerLastIDs(conn)
loid, ltid, lptid, backup_tid = packet.decode()
self.assertEqual(loid, oid)
self.assertEqual(ltid, tid)
self.assertEqual(lptid, ptid)
self.assertEqual(backup_tid, None)
def test_13_askUnfinishedTransactions(self):
service = self.service
node, conn = self.identifyToMasterNode()
# give a uuid
service.askUnfinishedTransactions(conn)
packet = self.checkAnswerUnfinishedTransactions(conn)
max_tid, tid_list = packet.decode()
self.assertEqual(tid_list, [])
# create some transaction
node, conn = self.identifyToMasterNode(node_type=NodeTypes.CLIENT,
port=self.client_port)
ttid = self.app.tm.begin(node)
self.app.tm.prepare(ttid, 1,
[self.getOID(1)], [node.getUUID()], 1)
conn = self.getFakeConnection(node.getUUID(), self.storage_address)
service.askUnfinishedTransactions(conn)
max_tid, tid_list = self.checkAnswerUnfinishedTransactions(conn, decode=True)
self.assertEqual(len(tid_list), 1)
def test_connectionClosed(self):
method = self.service.connectionClosed
state = NodeStates.TEMPORARILY_DOWN
# define two nodes
node1, conn1 = self.identifyToMasterNode()
node2, conn2 = self.identifyToMasterNode(port=10022)
node1.setRunning()
node2.setRunning()
self.assertEqual(node1.getState(), NodeStates.RUNNING)
self.assertEqual(node2.getState(), NodeStates.RUNNING)
# filled the pt
self.app.pt.make(self.app.nm.getStorageList())
self.assertTrue(self.app.pt.filled())
self.assertTrue(self.app.pt.operational())
# drop one node
lptid = self.app.pt.getID()
method(conn1)
self.assertEqual(node1.getState(), state)
self.assertTrue(lptid < self.app.pt.getID())
# drop the second, no storage node left
lptid = self.app.pt.getID()
self.assertEqual(node2.getState(), NodeStates.RUNNING)
self.assertRaises(OperationFailure, method, conn2)
self.assertEqual(node2.getState(), state)
self.assertEqual(lptid, self.app.pt.getID())
def test_nodeLostAfterAskLockInformation(self):
# 2 storage nodes, one will die
node1, conn1 = self._getStorage()
node2, conn2 = self._getStorage()
# client nodes, to distinguish answers for the sample transactions
client1, cconn1 = self._getClient()
client2, cconn2 = self._getClient()
client3, cconn3 = self._getClient()
oid_list = [self.getOID(), ]
# Some shortcuts to simplify test code
self.app.pt = Mock({'operational': True})
# Register some transactions
tm = self.app.tm
# Transaction 1: 2 storage nodes involved, one will die and the other
# already answered node lock
msg_id_1 = 1
ttid1 = tm.begin(client1)
tid1 = tm.prepare(ttid1, 1, oid_list,
[node1.getUUID(), node2.getUUID()], msg_id_1)
tm.lock(ttid1, node2.getUUID())
# storage 1 request a notification at commit
tm. registerForNotification(node1.getUUID())
self.checkNoPacketSent(cconn1)
# Storage 1 dies
node1.setTemporarilyDown()
self.service.nodeLost(conn1, node1)
# T1: last locking node lost, client receives AnswerTransactionFinished
self.checkAnswerTransactionFinished(cconn1)
self.checkNotifyTransactionFinished(conn1)
self.checkNotifyUnlockInformation(conn2)
# ...and notifications are sent to other clients
self.checkInvalidateObjects(cconn2)
self.checkInvalidateObjects(cconn3)
# Transaction 2: 2 storage nodes involved, one will die
msg_id_2 = 2
ttid2 = tm.begin(node1)
tid2 = tm.prepare(ttid2, 1, oid_list,
[node1.getUUID(), node2.getUUID()], msg_id_2)
# T2: pending locking answer, client keeps waiting
self.checkNoPacketSent(cconn2, check_notify=False)
tm.remove(node1.getUUID(), ttid2)
# Transaction 3: 1 storage node involved, which won't die
msg_id_3 = 3
ttid3 = tm.begin(node1)
tid3 = tm.prepare(ttid3, 1, oid_list,
[node2.getUUID(), ], msg_id_3)
# T3: action not significant to this transacion, so no response
self.checkNoPacketSent(cconn3, check_notify=False)
tm.remove(node1.getUUID(), ttid3)
def test_answerPack(self):
# Note: incomming status has no meaning here, so it's left to False.
node1, conn1 = self._getStorage()
node2, conn2 = self._getStorage()
self.app.packing = None
# Does nothing
self.service.answerPack(None, False)
client_conn = Mock({
'getPeerId': 512,
})
client_peer_id = 42
self.app.packing = (client_conn, client_peer_id,
{conn1.getUUID(), conn2.getUUID()})
self.service.answerPack(conn1, False)
self.checkNoPacketSent(client_conn)
self.assertEqual(self.app.packing[2], {conn2.getUUID()})
self.service.answerPack(conn2, False)
status = self.checkAnswerPacket(client_conn, Packets.AnswerPack,
decode=True)[0]
# TODO: verify packet peer id
self.assertTrue(status)
self.assertEqual(self.app.packing, None)
def test_notifyReady(self):
node, conn = self._getStorage()
uuid = node.getUUID()
self.assertFalse(self.app.isStorageReady(uuid))
self.service.notifyReady(conn)
self.assertTrue(self.app.isStorageReady(uuid))
if __name__ == '__main__':
unittest.main()
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/tests/master/testTransactions.py 0000664 0000000 0000000 00000024556 12601037530 0030250 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2006-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import unittest
from mock import Mock
from struct import pack
from .. import NeoUnitTestBase
from neo.lib.protocol import NodeTypes
from neo.lib.util import packTID, unpackTID, addTID
from neo.master.transactions import Transaction, TransactionManager
class testTransactionManager(NeoUnitTestBase):
def makeTID(self, i):
return pack('!Q', i)
def makeOID(self, i):
return pack('!Q', i)
def makeNode(self, node_type):
uuid = self.getNewUUID(node_type)
node = Mock({'getUUID': uuid, '__hash__': uuid, '__repr__': 'FakeNode'})
return uuid, node
def testTransaction(self):
# test data
node = Mock({'__repr__': 'Node'})
tid = self.makeTID(1)
ttid = self.makeTID(2)
oid_list = (oid1, oid2) = [self.makeOID(1), self.makeOID(2)]
uuid_list = (uuid1, uuid2) = [self.getStorageUUID(),
self.getStorageUUID()]
msg_id = 1
# create transaction object
txn = Transaction(node, ttid)
txn.prepare(tid, oid_list, uuid_list, msg_id)
self.assertEqual(txn.getUUIDList(), uuid_list)
self.assertEqual(txn.getOIDList(), oid_list)
# lock nodes one by one
self.assertFalse(txn.lock(uuid1))
self.assertTrue(txn.lock(uuid2))
# check that repr() works
repr(txn)
def testManager(self):
# test data
node = Mock({'__hash__': 1})
msg_id = 1
oid_list = (oid1, oid2) = self.makeOID(1), self.makeOID(2)
uuid_list = uuid1, uuid2 = self.getStorageUUID(), self.getStorageUUID()
client_uuid = self.getClientUUID()
# create transaction manager
callback = Mock()
txnman = TransactionManager(on_commit=callback)
self.assertFalse(txnman.hasPending())
self.assertEqual(txnman.registerForNotification(uuid1), [])
# begin the transaction
ttid = txnman.begin(node)
self.assertTrue(ttid is not None)
self.assertEqual(len(txnman.registerForNotification(uuid1)), 1)
self.assertTrue(txnman.hasPending())
# prepare the transaction
tid = txnman.prepare(ttid, 1, oid_list, uuid_list, msg_id)
self.assertTrue(txnman.hasPending())
self.assertEqual(txnman.registerForNotification(uuid1), [ttid])
txn = txnman[ttid]
self.assertEqual(txn.getTID(), tid)
self.assertEqual(txn.getUUIDList(), list(uuid_list))
self.assertEqual(txn.getOIDList(), list(oid_list))
# lock nodes
txnman.lock(ttid, uuid1)
self.assertEqual(len(callback.getNamedCalls('__call__')), 0)
txnman.lock(ttid, uuid2)
self.assertEqual(len(callback.getNamedCalls('__call__')), 1)
# transaction finished
txnman.remove(client_uuid, ttid)
self.assertEqual(txnman.registerForNotification(uuid1), [])
def testAbortFor(self):
oid_list = [self.makeOID(1), ]
storage_1_uuid, node1 = self.makeNode(NodeTypes.STORAGE)
storage_2_uuid, node2 = self.makeNode(NodeTypes.STORAGE)
client_uuid, client = self.makeNode(NodeTypes.CLIENT)
txnman = TransactionManager(lambda tid, txn: None)
# register 4 transactions made by two nodes
self.assertEqual(txnman.registerForNotification(storage_1_uuid), [])
ttid1 = txnman.begin(client)
tid1 = txnman.prepare(ttid1, 1, oid_list, [storage_1_uuid], 1)
self.assertEqual(txnman.registerForNotification(storage_1_uuid), [ttid1])
# abort transactions of another node, transaction stays
txnman.abortFor(node2)
self.assertEqual(txnman.registerForNotification(storage_1_uuid), [ttid1])
# abort transactions of requesting node, transaction is not removed
# because the transaction is prepared and must remains until the end of
# the 2PC
txnman.abortFor(node1)
self.assertEqual(txnman.registerForNotification(storage_1_uuid), [ttid1])
self.assertTrue(txnman.hasPending())
# ...and the lock is available
txnman.begin(client, self.getNextTID())
def test_getNextOIDList(self):
txnman = TransactionManager(lambda tid, txn: None)
# must raise as we don"t have one
self.assertEqual(txnman.getLastOID(), None)
self.assertRaises(RuntimeError, txnman.getNextOIDList, 1)
# ask list
txnman.setLastOID(self.getOID(1))
oid_list = txnman.getNextOIDList(15)
self.assertEqual(len(oid_list), 15)
# begin from 1, so generated oid from 2 to 16
for i, oid in zip(xrange(len(oid_list)), oid_list):
self.assertEqual(oid, self.getOID(i+2))
def test_forget(self):
client1 = Mock({'__hash__': 1})
client2 = Mock({'__hash__': 2})
client3 = Mock({'__hash__': 3})
storage_1_uuid = self.getStorageUUID()
storage_2_uuid = self.getStorageUUID()
oid_list = [self.makeOID(1), ]
client_uuid = self.getClientUUID()
tm = TransactionManager(lambda tid, txn: None)
# Transaction 1: 2 storage nodes involved, one will die and the other
# already answered node lock
msg_id_1 = 1
ttid1 = tm.begin(client1)
tid1 = tm.prepare(ttid1, 1, oid_list,
[storage_1_uuid, storage_2_uuid], msg_id_1)
tm.lock(ttid1, storage_2_uuid)
t1 = tm[ttid1]
self.assertFalse(t1.locked())
# Storage 1 dies:
# t1 is over
self.assertTrue(t1.forget(storage_1_uuid))
self.assertEqual(t1.getUUIDList(), [storage_2_uuid])
tm.remove(client_uuid, tid1)
# Transaction 2: 2 storage nodes involved, one will die
msg_id_2 = 2
ttid2 = tm.begin(client2)
tid2 = tm.prepare(ttid2, 1, oid_list,
[storage_1_uuid, storage_2_uuid], msg_id_2)
t2 = tm[ttid2]
self.assertFalse(t2.locked())
# Storage 1 dies:
# t2 still waits for storage 2
self.assertFalse(t2.forget(storage_1_uuid))
self.assertEqual(t2.getUUIDList(), [storage_2_uuid])
self.assertTrue(t2.lock(storage_2_uuid))
tm.remove(client_uuid, tid2)
# Transaction 3: 1 storage node involved, which won't die
msg_id_3 = 3
ttid3 = tm.begin(client3)
tid3 = tm.prepare(ttid3, 1, oid_list, [storage_2_uuid, ],
msg_id_3)
t3 = tm[ttid3]
self.assertFalse(t3.locked())
# Storage 1 dies:
# t3 doesn't care
self.assertFalse(t3.forget(storage_1_uuid))
self.assertEqual(t3.getUUIDList(), [storage_2_uuid])
self.assertTrue(t3.lock(storage_2_uuid))
tm.remove(client_uuid, tid3)
def testTIDUtils(self):
"""
Tests packTID/unpackTID/addTID.
"""
min_tid = pack('!LL', 0, 0)
min_unpacked_tid = ((1900, 1, 1, 0, 0), 0)
max_tid = pack('!LL', 2**32 - 1, 2 ** 32 - 1)
# ((((9917 - 1900) * 12 + (10 - 1)) * 31 + (14 - 1)) * 24 + 4) * 60 +
# 15 == 2**32 - 1
max_unpacked_tid = ((9917, 10, 14, 4, 15), 2**32 - 1)
self.assertEqual(unpackTID(min_tid), min_unpacked_tid)
self.assertEqual(unpackTID(max_tid), max_unpacked_tid)
self.assertEqual(packTID(*min_unpacked_tid), min_tid)
self.assertEqual(packTID(*max_unpacked_tid), max_tid)
self.assertEqual(addTID(min_tid, 1), pack('!LL', 0, 1))
self.assertEqual(addTID(pack('!LL', 0, 2**32 - 1), 1),
pack('!LL', 1, 0))
self.assertEqual(addTID(pack('!LL', 0, 2**32 - 1), 2**32 + 1),
pack('!LL', 2, 0))
# Check impossible dates are avoided (2010/11/31 doesn't exist)
self.assertEqual(
unpackTID(addTID(packTID((2010, 11, 30, 23, 59), 2**32 - 1), 1)),
((2010, 12, 1, 0, 0), 0))
def testTransactionLock(self):
"""
Transaction lock is present to ensure invalidation TIDs are sent in
strictly increasing order.
Note: this implementation might change later, to allow more paralelism.
"""
client_uuid, client = self.makeNode(NodeTypes.CLIENT)
tm = TransactionManager(lambda tid, txn: None)
# With a requested TID, lock spans from begin to remove
ttid1 = self.getNextTID()
ttid2 = self.getNextTID()
tid1 = tm.begin(client, ttid1)
self.assertEqual(tid1, ttid1)
tm.remove(client_uuid, tid1)
# Without a requested TID, lock spans from prepare to remove only
ttid3 = tm.begin(client)
ttid4 = tm.begin(client) # Doesn't raise
node = Mock({'getUUID': client_uuid, '__hash__': 0})
tid4 = tm.prepare(ttid4, 1, [], [], 0)
tm.remove(client_uuid, tid4)
tm.prepare(ttid3, 1, [], [], 0)
def testClientDisconectsAfterBegin(self):
client_uuid1, node1 = self.makeNode(NodeTypes.CLIENT)
tm = TransactionManager(lambda tid, txn: None)
tid1 = self.getNextTID()
tid2 = self.getNextTID()
tm.begin(node1, tid1)
tm.abortFor(node1)
self.assertTrue(tid1 not in tm)
def testUnlockPending(self):
callback = Mock()
uuid1, node1 = self.makeNode(NodeTypes.CLIENT)
uuid2, node2 = self.makeNode(NodeTypes.CLIENT)
storage_uuid = self.getStorageUUID()
tm = TransactionManager(callback)
ttid1 = tm.begin(node1)
ttid2 = tm.begin(node2)
tid1 = tm.prepare(ttid1, 1, [], [storage_uuid], 0)
tid2 = tm.prepare(ttid2, 1, [], [storage_uuid], 0)
tm.lock(ttid2, storage_uuid)
# txn 2 is still blocked by txn 1
self.assertEqual(len(callback.getNamedCalls('__call__')), 0)
tm.lock(ttid1, storage_uuid)
# both transactions are unlocked when txn 1 is fully locked
self.assertEqual(len(callback.getNamedCalls('__call__')), 2)
if __name__ == '__main__':
unittest.main()
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/tests/master/testVerification.py 0000664 0000000 0000000 00000022630 12601037530 0030211 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2009-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import unittest
from struct import pack, unpack
from .. import NeoUnitTestBase
from neo.lib.protocol import NodeTypes, NodeStates
from neo.master.verification import VerificationManager, VerificationFailure
from neo.master.app import Application
class MasterVerificationTests(NeoUnitTestBase):
def setUp(self):
NeoUnitTestBase.setUp(self)
# create an application object
config = self.getMasterConfiguration()
self.app = Application(config)
self.app.pt.clear()
self.verification = VerificationManager(self.app)
self.app.loid = '\0' * 8
self.app.tm.setLastTID('\0' * 8)
for node in self.app.nm.getMasterList():
self.app.unconnected_master_node_set.add(node.getAddress())
node.setState(NodeStates.RUNNING)
# define some variable to simulate client and storage node
self.client_port = 11022
self.storage_port = 10021
self.master_port = 10011
self.master_address = ('127.0.0.1', self.master_port)
self.storage_address = ('127.0.0.1', self.storage_port)
def _tearDown(self, success):
self.app.close()
NeoUnitTestBase._tearDown(self, success)
# Common methods
def identifyToMasterNode(self, node_type=NodeTypes.STORAGE, ip="127.0.0.1",
port=10021):
"""Do first step of identification to MN
"""
uuid = self.getNewUUID(node_type)
self.app.nm.createFromNodeType(
node_type,
address=(ip, port),
uuid=uuid,
)
return uuid
# Tests
def test_01_connectionClosed(self):
# test a storage, must raise as cluster no longer op
uuid = self.identifyToMasterNode()
conn = self.getFakeConnection(uuid, self.storage_address)
self.assertEqual(self.app.nm.getByAddress(conn.getAddress()).getState(),
NodeStates.UNKNOWN)
self.assertRaises(VerificationFailure, self.verification.connectionClosed, conn)
self.assertEqual(self.app.nm.getByAddress(conn.getAddress()).getState(),
NodeStates.TEMPORARILY_DOWN)
def _test_09_answerLastIDs(self):
# XXX: test disabled, should be an unexpected packet
verification = self.verification
uuid = self.identifyToMasterNode()
loid = self.app.loid
ltid = self.app.tm.getLastTID()
lptid = '\0' * 8
# send information which are later to what PMN knows, this must raise
conn = self.getFakeConnection(uuid, self.storage_address)
node_list = []
new_ptid = unpack('!Q', lptid)[0]
new_ptid = pack('!Q', new_ptid + 1)
oid = unpack('!Q', loid)[0]
new_oid = pack('!Q', oid + 1)
upper, lower = unpack('!LL', ltid)
new_tid = pack('!LL', upper, lower + 10)
self.assertTrue(new_ptid > self.app.pt.getID())
self.assertTrue(new_oid > self.app.loid)
self.assertTrue(new_tid > self.app.tm.getLastTID())
self.assertRaises(VerificationFailure, verification.answerLastIDs, conn, new_oid, new_tid, new_ptid)
self.assertNotEqual(new_oid, self.app.loid)
self.assertNotEqual(new_tid, self.app.tm.getLastTID())
self.assertNotEqual(new_ptid, self.app.pt.getID())
def test_11_answerUnfinishedTransactions(self):
verification = self.verification
uuid = self.identifyToMasterNode()
# do nothing
conn = self.getFakeConnection(uuid, self.storage_address)
self.assertEqual(len(self.verification._uuid_set), 0)
self.assertEqual(len(self.verification._tid_set), 0)
new_tid = self.getNextTID()
verification.answerUnfinishedTransactions(conn, new_tid, [new_tid])
self.assertEqual(len(self.verification._tid_set), 0)
# update dict
conn = self.getFakeConnection(uuid, self.storage_address)
self.verification._uuid_set.add(uuid)
self.assertEqual(len(self.verification._tid_set), 0)
new_tid = self.getNextTID(new_tid)
verification.answerUnfinishedTransactions(conn, new_tid, [new_tid])
self.assertTrue(uuid not in self.verification._uuid_set)
self.assertEqual(len(self.verification._tid_set), 1)
self.assertTrue(new_tid in self.verification._tid_set)
def test_12_answerTransactionInformation(self):
verification = self.verification
uuid = self.identifyToMasterNode()
# do nothing, as unfinished_oid_set is None
conn = self.getFakeConnection(uuid, self.storage_address)
self.assertEqual(len(self.verification._uuid_set), 0)
self.verification._uuid_set.add(uuid)
self.verification._oid_set = None
new_tid = self.getNextTID()
new_oid = self.getOID(1)
verification.answerTransactionInformation(conn, new_tid,
"user", "desc", "ext", False, [new_oid,])
self.assertEqual(self.verification._oid_set, None)
# do nothing as asking_uuid_dict is True
conn = self.getFakeConnection(uuid, self.storage_address)
self.assertEqual(len(self.verification._uuid_set), 0)
self.verification._oid_set = set()
self.assertEqual(len(self.verification._oid_set), 0)
verification.answerTransactionInformation(conn, new_tid,
"user", "desc", "ext", False, [new_oid,])
self.assertEqual(len(self.verification._oid_set), 0)
# do work
conn = self.getFakeConnection(uuid, self.storage_address)
self.assertEqual(len(self.verification._uuid_set), 0)
self.verification._uuid_set.add(uuid)
self.assertEqual(len(self.verification._oid_set), 0)
verification.answerTransactionInformation(conn, new_tid,
"user", "desc", "ext", False, [new_oid,])
self.assertEqual(len(self.verification._oid_set), 1)
self.assertTrue(new_oid in self.verification._oid_set)
# do not work as oid is diff
conn = self.getFakeConnection(uuid, self.storage_address)
self.assertEqual(len(self.verification._uuid_set), 0)
self.verification._uuid_set.add(uuid)
self.assertEqual(len(self.verification._oid_set), 1)
new_oid = self.getOID(2)
self.assertRaises(ValueError, verification.answerTransactionInformation,
conn, new_tid, "user", "desc", "ext", False, [new_oid,])
def test_13_tidNotFound(self):
verification = self.verification
uuid = self.identifyToMasterNode()
# do nothing as asking_uuid_dict is True
conn = self.getFakeConnection(uuid, self.storage_address)
self.assertEqual(len(self.verification._uuid_set), 0)
self.verification._oid_set = []
verification.tidNotFound(conn, "msg")
self.assertNotEqual(self.verification._oid_set, None)
# do work as asking_uuid_dict is False
conn = self.getFakeConnection(uuid, self.storage_address)
self.assertEqual(len(self.verification._uuid_set), 0)
self.verification._uuid_set.add(uuid)
self.verification._oid_set = []
verification.tidNotFound(conn, "msg")
self.assertEqual(self.verification._oid_set, None)
def test_14_answerObjectPresent(self):
verification = self.verification
uuid = self.identifyToMasterNode()
# do nothing as asking_uuid_dict is True
new_tid = self.getNextTID()
new_oid = self.getOID(1)
conn = self.getFakeConnection(uuid, self.storage_address)
self.assertEqual(len(self.verification._uuid_set), 0)
verification.answerObjectPresent(conn, new_oid, new_tid)
# do work
conn = self.getFakeConnection(uuid, self.storage_address)
self.assertEqual(len(self.verification._uuid_set), 0)
self.verification._uuid_set.add(uuid)
verification.answerObjectPresent(conn, new_oid, new_tid)
self.assertTrue(uuid not in self.verification._uuid_set)
def test_15_oidNotFound(self):
verification = self.verification
uuid = self.identifyToMasterNode()
# do nothing as asking_uuid_dict is True
conn = self.getFakeConnection(uuid, self.storage_address)
self.assertEqual(len(self.verification._uuid_set), 0)
self.app._object_present = True
self.assertTrue(self.app._object_present)
verification.oidNotFound(conn, "msg")
self.assertTrue(self.app._object_present)
# do work as asking_uuid_dict is False
conn = self.getFakeConnection(uuid, self.storage_address)
self.assertEqual(len(self.verification._uuid_set), 0)
self.verification._uuid_set.add(uuid)
self.assertTrue(self.app._object_present)
verification.oidNotFound(conn, "msg")
self.assertFalse(self.app._object_present)
self.assertTrue(uuid not in self.verification._uuid_set)
if __name__ == '__main__':
unittest.main()
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/tests/stat_zodb.py 0000775 0000000 0000000 00000013364 12601037530 0025374 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# -*- coding: utf-8 -*-
import math, random, sys
from cStringIO import StringIO
from ZODB.utils import p64, u64
from ZODB.BaseStorage import TransactionRecord
from ZODB.FileStorage import FileStorage
# Stats of a 43.5 GB production Data.fs
# µ σ
# size of object 6.04237779991 1.55811487853
# # objects / transaction 1.04108991045 0.906703192546
# size of transaction 7.98615420517 1.6624220402
#
# % of new object / transaction: 0.810080409164
# # of transactions: 1541194
# compression ratio: 28.5 % (gzip -6)
PROD1 = lambda random=random: DummyZODB(6.04237779991, 1.55811487853,
1.04108991045, 0.906703192546,
0.810080409164, random)
def DummyData(random=random):
# returns data that gzip at about 28.5 %
# make sure sample is bigger than dictionary of compressor
data = ''.join(chr(int(random.gauss(0, .8)) % 256) for x in xrange(100000))
return StringIO(data)
class DummyZODB(object):
"""
Object size and count of generated transaction follows a log normal
distribution, where *_mu and *_sigma are their parameters.
"""
def __init__(self, obj_size_mu, obj_size_sigma,
obj_count_mu, obj_count_sigma,
new_ratio, random=random):
self.obj_size_mu = obj_size_mu
self.obj_size_sigma = obj_size_sigma
self.obj_count_mu = obj_count_mu
self.obj_count_sigma = obj_count_sigma
self.random = random
self.new_ratio = new_ratio
self.next_oid = 0
self.err_count = 0
self.tid = u64('TID\0\0\0\0\0')
def __call__(self):
variate = self.random.lognormvariate
oid_set = set()
for i in xrange(int(round(variate(self.obj_count_mu,
self.obj_count_sigma))) or 1):
if len(oid_set) >= self.next_oid or \
self.random.random() < self.new_ratio:
oid = self.next_oid
self.next_oid = oid + 1
else:
while True:
oid = self.random.randrange(self.next_oid)
if oid not in oid_set:
break
oid_set.add(oid)
yield p64(oid), int(round(variate(self.obj_size_mu,
self.obj_size_sigma))) or 1
def as_storage(self, stop, dummy_data_file=None):
if dummy_data_file is None:
dummy_data_file = DummyData(self.random)
if isinstance(stop, int):
stop = (lambda x: lambda y: x <= y)(stop)
class dummy_change(object):
data_txn = None
version = ''
def __init__(self, tid, oid, size):
self.tid = tid
self.oid = oid
data = ''
while size:
d = dummy_data_file.read(size)
size -= len(d)
data += d
if size:
dummy_data_file.seek(0)
self.data = data
class dummy_transaction(TransactionRecord):
def __init__(transaction, *args):
TransactionRecord.__init__(transaction, *args)
transaction_size = 0
transaction.record_list = []
add_record = transaction.record_list.append
for x in self():
oid, size = x
transaction_size += size
add_record(dummy_change(transaction.tid, oid, size))
transaction.size = transaction_size
def __iter__(transaction):
return iter(transaction.record_list)
class dummy_storage(object):
size = 0
def iterator(storage, *args):
args = ' ', '', '', {}
i = 0
variate = self.random.lognormvariate
while not stop(i):
self.tid += max(1, int(variate(10, 3)))
t = dummy_transaction(p64(self.tid), *args)
storage.size += t.size
yield t
i += 1
def getSize(self):
return self.size
return dummy_storage()
def lognorm_stat(X):
Y = map(math.log, X)
n = len(Y)
mu = sum(Y) / n
s2 = sum(d*d for d in (y - mu for y in Y)) / n
return mu, math.sqrt(s2)
def stat(*storages):
obj_size_list = []
obj_count_list = []
tr_size_list = []
oid_set = set()
for storage in storages:
for transaction in storage.iterator():
obj_count = tr_size = 0
for r in transaction:
if r.data:
obj_count += 1
oid = r.oid
if oid not in oid_set:
oid_set.add(oid)
size = len(r.data)
tr_size += size
obj_size_list.append(size)
obj_count_list.append(obj_count)
tr_size_list.append(tr_size)
new_ratio = float(len(oid_set)) / len(obj_size_list)
return (lognorm_stat(obj_size_list),
lognorm_stat(obj_count_list),
lognorm_stat(tr_size_list),
new_ratio, len(tr_size_list))
def main():
s = stat(*(FileStorage(x, read_only=True) for x in sys.argv[1:]))
print(u" %-15s σ\n"
"size of object %-15s %s\n"
"# objects / transaction %-15s %s\n"
"size of transaction %-15s %s\n"
"\n%% of new object / transaction: %s"
"\n# of transactions: %s"
% ((u"µ",) + s[0] + s[1] + s[2] + s[3:]))
if __name__ == "__main__":
sys.exit(main())
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/tests/storage/ 0000775 0000000 0000000 00000000000 12601037530 0024463 5 ustar 00root root 0000000 0000000 neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/tests/storage/__init__.py 0000664 0000000 0000000 00000000000 12601037530 0026562 0 ustar 00root root 0000000 0000000 neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/tests/storage/testClientHandler.py 0000664 0000000 0000000 00000030417 12601037530 0030456 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2009-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import unittest
from mock import Mock, ReturnValues
from collections import deque
from neo.lib.util import makeChecksum
from .. import NeoUnitTestBase
from neo.storage.app import Application
from neo.storage.transactions import ConflictError
from neo.storage.handlers.client import ClientOperationHandler
from neo.lib.protocol import INVALID_PARTITION, INVALID_TID, INVALID_OID
from neo.lib.protocol import Packets, LockState, ZERO_HASH
class StorageClientHandlerTests(NeoUnitTestBase):
def checkHandleUnexpectedPacket(self, _call, _msg_type, _listening=True, **kwargs):
conn = self.getFakeConnection(address=("127.0.0.1", self.master_port),
is_server=_listening)
# hook
self.operation.peerBroken = lambda c: c.peerBrokendCalled()
self.checkUnexpectedPacketRaised(_call, conn=conn, **kwargs)
def setUp(self):
NeoUnitTestBase.setUp(self)
self.prepareDatabase(number=1)
# create an application object
config = self.getStorageConfiguration(master_number=1)
self.app = Application(config)
self.app.transaction_dict = {}
self.app.store_lock_dict = {}
self.app.load_lock_dict = {}
self.app.event_queue = deque()
self.app.event_queue_dict = {}
self.app.tm = Mock({'__contains__': True})
# handler
self.operation = ClientOperationHandler(self.app)
# set pmn
self.master_uuid = self.getMasterUUID()
pmn = self.app.nm.getMasterList()[0]
pmn.setUUID(self.master_uuid)
self.app.primary_master_node = pmn
self.master_port = 10010
def _tearDown(self, success):
self.app.close()
del self.app
super(StorageClientHandlerTests, self)._tearDown(success)
def _getConnection(self, uuid=None):
return self.getFakeConnection(uuid=uuid, address=('127.0.0.1', 1000))
def _checkTransactionsAborted(self, uuid):
calls = self.app.tm.mockGetNamedCalls('abortFor')
self.assertEqual(len(calls), 1)
calls[0].checkArgs(uuid)
def test_connectionLost(self):
uuid = self.getClientUUID()
self.app.nm.createClient(uuid=uuid)
conn = self._getConnection(uuid=uuid)
self.operation.connectionClosed(conn)
def test_18_askTransactionInformation1(self):
# transaction does not exists
conn = self._getConnection()
self.app.dm = Mock({'getNumPartitions': 1})
self.operation.askTransactionInformation(conn, INVALID_TID)
self.checkErrorPacket(conn)
def test_18_askTransactionInformation2(self):
# answer
conn = self._getConnection()
oid_list = [self.getOID(1), self.getOID(2)]
dm = Mock({ "getTransaction": (oid_list, 'user', 'desc', '', False), })
self.app.dm = dm
self.operation.askTransactionInformation(conn, INVALID_TID)
self.checkAnswerTransactionInformation(conn)
def test_24_askObject1(self):
# delayed response
conn = self._getConnection()
self.app.dm = Mock()
self.app.tm = Mock({'loadLocked': True})
self.app.load_lock_dict[INVALID_OID] = object()
self.assertEqual(len(self.app.event_queue), 0)
self.operation.askObject(conn, oid=INVALID_OID,
serial=INVALID_TID, tid=INVALID_TID)
self.assertEqual(len(self.app.event_queue), 1)
self.checkNoPacketSent(conn)
self.assertEqual(len(self.app.dm.mockGetNamedCalls('getObject')), 0)
def test_24_askObject2(self):
# invalid serial / tid / packet not found
self.app.dm = Mock({'getObject': None})
conn = self._getConnection()
self.assertEqual(len(self.app.event_queue), 0)
self.operation.askObject(conn, oid=INVALID_OID,
serial=INVALID_TID, tid=INVALID_TID)
calls = self.app.dm.mockGetNamedCalls('getObject')
self.assertEqual(len(self.app.event_queue), 0)
self.assertEqual(len(calls), 1)
calls[0].checkArgs(INVALID_OID, INVALID_TID, INVALID_TID)
self.checkErrorPacket(conn)
def test_24_askObject3(self):
# object found => answer
serial = self.getNextTID()
next_serial = self.getNextTID()
oid = self.getOID(1)
tid = self.getNextTID()
H = "0" * 20
self.app.dm = Mock({'getObject': (serial, next_serial, 0, H, '', None)})
conn = self._getConnection()
self.assertEqual(len(self.app.event_queue), 0)
self.operation.askObject(conn, oid=oid, serial=serial, tid=tid)
self.assertEqual(len(self.app.event_queue), 0)
self.checkAnswerObject(conn)
def test_25_askTIDs1(self):
# invalid offsets => error
app = self.app
app.pt = Mock()
app.dm = Mock()
conn = self._getConnection()
self.checkProtocolErrorRaised(self.operation.askTIDs, conn, 1, 1, None)
self.assertEqual(len(app.pt.mockGetNamedCalls('getCellList')), 0)
self.assertEqual(len(app.dm.mockGetNamedCalls('getTIDList')), 0)
def test_25_askTIDs2(self):
# well case => answer
conn = self._getConnection()
self.app.pt = Mock({'getPartitions': 1})
self.app.dm = Mock({'getTIDList': (INVALID_TID, )})
self.operation.askTIDs(conn, 1, 2, 1)
calls = self.app.dm.mockGetNamedCalls('getTIDList')
self.assertEqual(len(calls), 1)
calls[0].checkArgs(1, 1, [1, ])
self.checkAnswerTids(conn)
def test_25_askTIDs3(self):
# invalid partition => answer usable partitions
conn = self._getConnection()
cell = Mock({'getUUID':self.app.uuid})
self.app.dm = Mock({'getTIDList': (INVALID_TID, )})
self.app.pt = Mock({
'getCellList': (cell, ),
'getPartitions': 1,
'getAssignedPartitionList': [0],
})
self.operation.askTIDs(conn, 1, 2, INVALID_PARTITION)
self.assertEqual(len(self.app.pt.mockGetNamedCalls('getAssignedPartitionList')), 1)
calls = self.app.dm.mockGetNamedCalls('getTIDList')
self.assertEqual(len(calls), 1)
calls[0].checkArgs(1, 1, [0])
self.checkAnswerTids(conn)
def test_26_askObjectHistory1(self):
# invalid offsets => error
app = self.app
app.dm = Mock()
conn = self._getConnection()
self.checkProtocolErrorRaised(self.operation.askObjectHistory, conn,
1, 1, None)
self.assertEqual(len(app.dm.mockGetNamedCalls('getObjectHistory')), 0)
def test_26_askObjectHistory2(self):
oid1, oid2 = self.getOID(1), self.getOID(2)
# first case: empty history
conn = self._getConnection()
self.app.dm = Mock({'getObjectHistory': None})
self.operation.askObjectHistory(conn, oid1, 1, 2)
self.checkErrorPacket(conn)
# second case: not empty history
conn = self._getConnection()
serial = self.getNextTID()
self.app.dm = Mock({'getObjectHistory': [(serial, 0, ), ]})
self.operation.askObjectHistory(conn, oid2, 1, 2)
self.checkAnswerObjectHistory(conn)
def test_askStoreTransaction(self):
conn = self._getConnection(uuid=self.getClientUUID())
tid = self.getNextTID()
user = 'USER'
desc = 'DESC'
ext = 'EXT'
oid_list = (self.getOID(1), self.getOID(2))
self.operation.askStoreTransaction(conn, tid, user, desc, ext, oid_list)
calls = self.app.tm.mockGetNamedCalls('storeTransaction')
self.assertEqual(len(calls), 1)
self.checkAnswerStoreTransaction(conn)
def _getObject(self):
oid = self.getOID(0)
serial = self.getNextTID()
data = 'DATA'
return (oid, serial, 1, makeChecksum(data), data)
def _checkStoreObjectCalled(self, *args):
calls = self.app.tm.mockGetNamedCalls('storeObject')
self.assertEqual(len(calls), 1)
calls[0].checkArgs(*args)
def test_askStoreObject1(self):
# no conflict => answer
conn = self._getConnection(uuid=self.getClientUUID())
tid = self.getNextTID()
oid, serial, comp, checksum, data = self._getObject()
self.operation.askStoreObject(conn, oid, serial, comp, checksum,
data, None, tid, False)
self._checkStoreObjectCalled(tid, serial, oid, comp,
checksum, data, None, False)
pconflicting, poid, pserial = self.checkAnswerStoreObject(conn,
decode=True)
self.assertEqual(pconflicting, 0)
self.assertEqual(poid, oid)
self.assertEqual(pserial, serial)
def test_askStoreObjectWithDataTID(self):
# same as test_askStoreObject1, but with a non-None data_tid value
conn = self._getConnection(uuid=self.getClientUUID())
tid = self.getNextTID()
oid, serial, comp, checksum, data = self._getObject()
data_tid = self.getNextTID()
self.operation.askStoreObject(conn, oid, serial, comp, ZERO_HASH,
'', data_tid, tid, False)
self._checkStoreObjectCalled(tid, serial, oid, comp,
None, None, data_tid, False)
pconflicting, poid, pserial = self.checkAnswerStoreObject(conn,
decode=True)
self.assertEqual(pconflicting, 0)
self.assertEqual(poid, oid)
self.assertEqual(pserial, serial)
def test_askStoreObject2(self):
# conflict error
conn = self._getConnection(uuid=self.getClientUUID())
tid = self.getNextTID()
locking_tid = self.getNextTID(tid)
def fakeStoreObject(*args):
raise ConflictError(locking_tid)
self.app.tm.storeObject = fakeStoreObject
oid, serial, comp, checksum, data = self._getObject()
self.operation.askStoreObject(conn, oid, serial, comp, checksum,
data, None, tid, False)
pconflicting, poid, pserial = self.checkAnswerStoreObject(conn,
decode=True)
self.assertEqual(pconflicting, 1)
self.assertEqual(poid, oid)
self.assertEqual(pserial, locking_tid)
def test_abortTransaction(self):
conn = self._getConnection()
tid = self.getNextTID()
self.operation.abortTransaction(conn, tid)
calls = self.app.tm.mockGetNamedCalls('abort')
self.assertEqual(len(calls), 1)
calls[0].checkArgs(tid)
def test_askObjectUndoSerial(self):
conn = self._getConnection(uuid=self.getClientUUID())
tid = self.getNextTID()
ltid = self.getNextTID()
undone_tid = self.getNextTID()
# Keep 2 entries here, so we check findUndoTID is called only once.
oid_list = [self.getOID(1), self.getOID(2)]
obj2_data = [] # Marker
self.app.tm = Mock({
'getObjectFromTransaction': None,
})
self.app.dm = Mock({
'findUndoTID': ReturnValues((None, None, False), )
})
self.operation.askObjectUndoSerial(conn, tid, ltid, undone_tid, oid_list)
self.checkErrorPacket(conn)
def test_askHasLock(self):
tid_1 = self.getNextTID()
tid_2 = self.getNextTID()
oid = self.getNextTID()
def getLockingTID(oid):
return locking_tid
self.app.tm.getLockingTID = getLockingTID
for locking_tid, status in (
(None, LockState.NOT_LOCKED),
(tid_1, LockState.GRANTED),
(tid_2, LockState.GRANTED_TO_OTHER),
):
conn = self._getConnection()
self.operation.askHasLock(conn, tid_1, oid)
p_oid, p_status = self.checkAnswerPacket(conn,
Packets.AnswerHasLock, decode=True)
self.assertEqual(oid, p_oid)
self.assertEqual(status, p_status)
if __name__ == "__main__":
unittest.main()
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/tests/storage/testIdentificationHandler.py 0000664 0000000 0000000 00000007424 12601037530 0032173 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2009-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import unittest
from mock import Mock
from .. import NeoUnitTestBase
from neo.lib.protocol import NodeTypes, NotReadyError, \
BrokenNodeDisallowedError
from neo.lib.pt import PartitionTable
from neo.storage.app import Application
from neo.storage.handlers.identification import IdentificationHandler
class StorageIdentificationHandlerTests(NeoUnitTestBase):
def setUp(self):
NeoUnitTestBase.setUp(self)
config = self.getStorageConfiguration(master_number=1)
self.app = Application(config)
self.app.name = 'NEO'
self.app.ready = True
self.app.pt = PartitionTable(4, 1)
self.identification = IdentificationHandler(self.app)
def _tearDown(self, success):
self.app.close()
del self.app
super(StorageIdentificationHandlerTests, self)._tearDown(success)
def test_requestIdentification1(self):
""" nodes are rejected during election or if unknown storage """
self.app.ready = False
self.assertRaises(
NotReadyError,
self.identification.requestIdentification,
self.getFakeConnection(),
NodeTypes.CLIENT,
self.getClientUUID(),
None,
self.app.name,
)
self.app.ready = True
self.assertRaises(
NotReadyError,
self.identification.requestIdentification,
self.getFakeConnection(),
NodeTypes.STORAGE,
self.getStorageUUID(),
None,
self.app.name,
)
def test_requestIdentification3(self):
""" broken nodes must be rejected """
uuid = self.getClientUUID()
conn = self.getFakeConnection(uuid=uuid)
node = self.app.nm.createClient(uuid=uuid)
node.setBroken()
self.assertRaises(BrokenNodeDisallowedError,
self.identification.requestIdentification,
conn,
NodeTypes.CLIENT,
uuid,
None,
self.app.name,
)
def test_requestIdentification2(self):
""" accepted client must be connected and running """
uuid = self.getClientUUID()
conn = self.getFakeConnection(uuid=uuid)
node = self.app.nm.createClient(uuid=uuid)
master = (self.local_ip, 3000)
self.app.master_node = Mock({
'getAddress': master,
})
self.identification.requestIdentification(conn, NodeTypes.CLIENT, uuid,
None, self.app.name)
self.assertTrue(node.isRunning())
self.assertTrue(node.isConnected())
self.assertEqual(node.getUUID(), uuid)
self.assertTrue(node.getConnection() is conn)
args = self.checkAcceptIdentification(conn, decode=True)
node_type, address, _np, _nr, _uuid, _master, _master_list = args
self.assertEqual(node_type, NodeTypes.STORAGE)
self.assertEqual(address, None)
self.assertEqual(_uuid, uuid)
self.assertEqual(_master, master)
# TODO: check _master_list ?
if __name__ == "__main__":
unittest.main()
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/tests/storage/testInitializationHandler.py 0000664 0000000 0000000 00000006717 12601037530 0032235 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2009-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import unittest
from .. import NeoUnitTestBase
from neo.lib.pt import PartitionTable
from neo.storage.app import Application
from neo.storage.handlers.initialization import InitializationHandler
from neo.lib.protocol import CellStates
from neo.lib.exception import PrimaryFailure
class StorageInitializationHandlerTests(NeoUnitTestBase):
def setUp(self):
NeoUnitTestBase.setUp(self)
self.prepareDatabase(number=1)
# create an application object
config = self.getStorageConfiguration(master_number=1)
self.app = Application(config)
self.verification = InitializationHandler(self.app)
# define some variable to simulate client and storage node
self.master_port = 10010
self.storage_port = 10020
self.client_port = 11011
self.num_partitions = 1009
self.num_replicas = 2
self.app.operational = False
self.app.load_lock_dict = {}
self.app.pt = PartitionTable(self.num_partitions, self.num_replicas)
def _tearDown(self, success):
self.app.close()
del self.app
super(StorageInitializationHandlerTests, self)._tearDown(success)
def getClientConnection(self):
address = ("127.0.0.1", self.client_port)
return self.getFakeConnection(uuid=self.getClientUUID(),
address=address)
def test_03_connectionClosed(self):
conn = self.getClientConnection()
self.app.listening_conn = object() # mark as running
self.assertRaises(PrimaryFailure, self.verification.connectionClosed, conn,)
# nothing happens
self.checkNoPacketSent(conn)
def test_09_answerPartitionTable(self):
# send a table
conn = self.getClientConnection()
self.app.pt = PartitionTable(3, 2)
node_1 = self.getStorageUUID()
node_2 = self.getStorageUUID()
node_3 = self.getStorageUUID()
self.app.uuid = node_1
# SN already know all nodes
self.app.nm.createStorage(uuid=node_1)
self.app.nm.createStorage(uuid=node_2)
self.app.nm.createStorage(uuid=node_3)
self.assertFalse(list(self.app.dm.getPartitionTable()))
row_list = [(0, ((node_1, CellStates.UP_TO_DATE), (node_2, CellStates.UP_TO_DATE))),
(1, ((node_3, CellStates.UP_TO_DATE), (node_1, CellStates.UP_TO_DATE))),
(2, ((node_2, CellStates.UP_TO_DATE), (node_3, CellStates.UP_TO_DATE)))]
self.assertFalse(self.app.pt.filled())
# send a complete new table and ack
self.verification.answerPartitionTable(conn, 2, row_list)
self.assertTrue(self.app.pt.filled())
self.assertEqual(self.app.pt.getID(), 2)
self.assertTrue(list(self.app.dm.getPartitionTable()))
if __name__ == "__main__":
unittest.main()
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/tests/storage/testMasterHandler.py 0000664 0000000 0000000 00000014667 12601037530 0030504 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2009-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import unittest
from mock import Mock
from collections import deque
from .. import NeoUnitTestBase
from neo.storage.app import Application
from neo.storage.handlers.master import MasterOperationHandler
from neo.lib.exception import PrimaryFailure, OperationFailure
from neo.lib.pt import PartitionTable
from neo.lib.protocol import CellStates, ProtocolError, Packets
class StorageMasterHandlerTests(NeoUnitTestBase):
def setUp(self):
NeoUnitTestBase.setUp(self)
self.prepareDatabase(number=1)
# create an application object
config = self.getStorageConfiguration(master_number=1)
self.app = Application(config)
self.app.transaction_dict = {}
self.app.store_lock_dict = {}
self.app.load_lock_dict = {}
self.app.event_queue = deque()
# handler
self.operation = MasterOperationHandler(self.app)
# set pmn
self.master_uuid = self.getMasterUUID()
pmn = self.app.nm.getMasterList()[0]
pmn.setUUID(self.master_uuid)
self.app.primary_master_node = pmn
self.master_port = 10010
def _tearDown(self, success):
self.app.close()
del self.app
super(StorageMasterHandlerTests, self)._tearDown(success)
def getMasterConnection(self):
address = ("127.0.0.1", self.master_port)
return self.getFakeConnection(uuid=self.master_uuid, address=address)
def test_07_connectionClosed2(self):
# primary has closed the connection
conn = self.getMasterConnection()
self.app.listening_conn = object() # mark as running
self.assertRaises(PrimaryFailure, self.operation.connectionClosed, conn)
self.checkNoPacketSent(conn)
def test_14_notifyPartitionChanges1(self):
# old partition change -> do nothing
app = self.app
conn = self.getMasterConnection()
app.replicator = Mock({})
self.app.pt = Mock({'getID': 1})
count = len(self.app.nm.getList())
self.operation.notifyPartitionChanges(conn, 0, ())
self.assertEqual(self.app.pt.getID(), 1)
self.assertEqual(len(self.app.nm.getList()), count)
calls = self.app.replicator.mockGetNamedCalls('removePartition')
self.assertEqual(len(calls), 0)
calls = self.app.replicator.mockGetNamedCalls('addPartition')
self.assertEqual(len(calls), 0)
def test_14_notifyPartitionChanges2(self):
# cases :
uuid1, uuid2, uuid3 = [self.getStorageUUID() for i in range(3)]
cells = (
(0, uuid1, CellStates.UP_TO_DATE),
(1, uuid2, CellStates.DISCARDED),
(2, uuid3, CellStates.OUT_OF_DATE),
)
# context
conn = self.getMasterConnection()
app = self.app
# register nodes
app.nm.createStorage(uuid=uuid1)
app.nm.createStorage(uuid=uuid2)
app.nm.createStorage(uuid=uuid3)
ptid1, ptid2 = (1, 2)
self.assertNotEqual(ptid1, ptid2)
app.pt = PartitionTable(3, 1)
app.dm = Mock({ })
app.replicator = Mock({})
self.operation.notifyPartitionChanges(conn, ptid2, cells)
# ptid set
self.assertEqual(app.pt.getID(), ptid2)
# dm call
calls = self.app.dm.mockGetNamedCalls('changePartitionTable')
self.assertEqual(len(calls), 1)
calls[0].checkArgs(ptid2, cells)
def test_16_stopOperation1(self):
# OperationFailure
conn = self.getFakeConnection(is_server=False)
self.assertRaises(OperationFailure, self.operation.stopOperation, conn)
def _getConnection(self):
return self.getFakeConnection()
def test_askLockInformation1(self):
""" Unknown transaction """
self.app.tm = Mock({'__contains__': False})
conn = self._getConnection()
oid_list = [self.getOID(1), self.getOID(2)]
tid = self.getNextTID()
ttid = self.getNextTID()
handler = self.operation
self.assertRaises(ProtocolError, handler.askLockInformation, conn,
ttid, tid, oid_list)
def test_askLockInformation2(self):
""" Lock transaction """
self.app.tm = Mock({'__contains__': True})
conn = self._getConnection()
tid = self.getNextTID()
ttid = self.getNextTID()
oid_list = [self.getOID(1), self.getOID(2)]
self.operation.askLockInformation(conn, ttid, tid, oid_list)
calls = self.app.tm.mockGetNamedCalls('lock')
self.assertEqual(len(calls), 1)
calls[0].checkArgs(ttid, tid, oid_list)
self.checkAnswerInformationLocked(conn)
def test_notifyUnlockInformation1(self):
""" Unknown transaction """
self.app.tm = Mock({'__contains__': False})
conn = self._getConnection()
tid = self.getNextTID()
handler = self.operation
self.assertRaises(ProtocolError, handler.notifyUnlockInformation,
conn, tid)
def test_notifyUnlockInformation2(self):
""" Unlock transaction """
self.app.tm = Mock({'__contains__': True})
conn = self._getConnection()
tid = self.getNextTID()
self.operation.notifyUnlockInformation(conn, tid)
calls = self.app.tm.mockGetNamedCalls('unlock')
self.assertEqual(len(calls), 1)
calls[0].checkArgs(tid)
self.checkNoPacketSent(conn)
def test_askPack(self):
self.app.dm = Mock({'pack': None})
conn = self.getFakeConnection()
tid = self.getNextTID()
self.operation.askPack(conn, tid)
calls = self.app.dm.mockGetNamedCalls('pack')
self.assertEqual(len(calls), 1)
calls[0].checkArgs(tid, self.app.tm.updateObjectDataForPack)
# Content has no meaning here, don't check.
self.checkAnswerPacket(conn, Packets.AnswerPack)
if __name__ == "__main__":
unittest.main()
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/tests/storage/testStorageApp.py 0000664 0000000 0000000 00000015151 12601037530 0030005 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2009-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import unittest
from mock import Mock, ReturnValues
from .. import NeoUnitTestBase
from neo.storage.app import Application
from neo.lib.protocol import CellStates
from collections import deque
from neo.lib.pt import PartitionTable
from neo.storage.exception import AlreadyPendingError
class StorageAppTests(NeoUnitTestBase):
def setUp(self):
NeoUnitTestBase.setUp(self)
self.prepareDatabase(number=1)
# create an application object
config = self.getStorageConfiguration(master_number=1)
self.app = Application(config)
self.app.event_queue = deque()
self.app.event_queue_dict = {}
def _tearDown(self, success):
self.app.close()
del self.app
super(StorageAppTests, self)._tearDown(success)
def test_01_loadPartitionTable(self):
self.app.dm = Mock({
'getPartitionTable': [],
})
self.assertEqual(self.app.pt, None)
num_partitions = 3
num_replicas = 2
self.app.pt = PartitionTable(num_partitions, num_replicas)
self.assertFalse(self.app.pt.getNodeSet())
self.assertFalse(self.app.pt.filled())
for x in xrange(num_partitions):
self.assertFalse(self.app.pt.hasOffset(x))
# load an empty table
self.app.loadPartitionTable()
self.assertFalse(self.app.pt.getNodeSet())
self.assertFalse(self.app.pt.filled())
for x in xrange(num_partitions):
self.assertFalse(self.app.pt.hasOffset(x))
# add some node, will be remove when loading table
master_uuid = self.getMasterUUID()
master = self.app.nm.createMaster(uuid=master_uuid)
storage_uuid = self.getStorageUUID()
storage = self.app.nm.createStorage(uuid=storage_uuid)
client_uuid = self.getClientUUID()
self.app.pt.setCell(0, master, CellStates.UP_TO_DATE)
self.app.pt.setCell(0, storage, CellStates.UP_TO_DATE)
self.assertEqual(len(self.app.pt.getNodeSet()), 2)
self.assertFalse(self.app.pt.filled())
for x in xrange(num_partitions):
if x == 0:
self.assertTrue(self.app.pt.hasOffset(x))
else:
self.assertFalse(self.app.pt.hasOffset(x))
# load an empty table, everything removed
self.app.loadPartitionTable()
self.assertFalse(self.app.pt.getNodeSet())
self.assertFalse(self.app.pt.filled())
for x in xrange(num_partitions):
self.assertFalse(self.app.pt.hasOffset(x))
# add some node
self.app.pt.setCell(0, master, CellStates.UP_TO_DATE)
self.app.pt.setCell(0, storage, CellStates.UP_TO_DATE)
self.assertEqual(len(self.app.pt.getNodeSet()), 2)
self.assertFalse(self.app.pt.filled())
for x in xrange(num_partitions):
if x == 0:
self.assertTrue(self.app.pt.hasOffset(x))
else:
self.assertFalse(self.app.pt.hasOffset(x))
# fill partition table
self.app.dm = Mock({
'getPartitionTable': [
(0, client_uuid, CellStates.UP_TO_DATE),
(1, client_uuid, CellStates.UP_TO_DATE),
(1, storage_uuid, CellStates.UP_TO_DATE),
(2, storage_uuid, CellStates.UP_TO_DATE),
(2, master_uuid, CellStates.UP_TO_DATE),
],
'getPTID': 1,
})
self.app.pt.clear()
self.app.loadPartitionTable()
self.assertTrue(self.app.pt.filled())
for x in xrange(num_partitions):
self.assertTrue(self.app.pt.hasOffset(x))
# check each row
cell_list = self.app.pt.getCellList(0)
self.assertEqual(len(cell_list), 1)
self.assertEqual(cell_list[0].getUUID(), client_uuid)
cell_list = self.app.pt.getCellList(1)
self.assertEqual(len(cell_list), 2)
self.assertTrue(cell_list[0].getUUID() in (client_uuid, storage_uuid))
self.assertTrue(cell_list[1].getUUID() in (client_uuid, storage_uuid))
cell_list = self.app.pt.getCellList(2)
self.assertEqual(len(cell_list), 2)
self.assertTrue(cell_list[0].getUUID() in (master_uuid, storage_uuid))
self.assertTrue(cell_list[1].getUUID() in (master_uuid, storage_uuid))
def test_02_queueEvent(self):
self.assertEqual(len(self.app.event_queue), 0)
msg_id = 1325136
event = Mock({'__repr__': 'event'})
conn = Mock({'__repr__': 'conn', 'getPeerId': msg_id})
key = 'foo'
self.app.queueEvent(event, conn, ("test", ), key=key)
self.assertEqual(len(self.app.event_queue), 1)
_key, _event, _msg_id, _conn, args = self.app.event_queue[0]
self.assertEqual(key, _key)
self.assertEqual(msg_id, _msg_id)
self.assertEqual(len(args), 1)
self.assertEqual(args[0], "test")
self.assertRaises(AlreadyPendingError, self.app.queueEvent, event,
conn, ("test2", ), key=key)
self.assertEqual(len(self.app.event_queue), 1)
self.app.queueEvent(event, conn, ("test3", ), key=key,
raise_on_duplicate=False)
self.assertEqual(len(self.app.event_queue), 2)
def test_03_executeQueuedEvents(self):
self.assertEqual(len(self.app.event_queue), 0)
msg_id = 1325136
msg_id_2 = 1325137
event = Mock({'__repr__': 'event'})
conn = Mock({'__repr__': 'conn', 'getPeerId': ReturnValues(msg_id, msg_id_2)})
self.app.queueEvent(event, conn, ("test", ))
self.app.executeQueuedEvents()
self.assertEqual(len(event.mockGetNamedCalls("__call__")), 1)
call = event.mockGetNamedCalls("__call__")[0]
params = call.getParam(1)
self.assertEqual(params, "test")
params = call.kwparams
self.assertEqual(params, {})
calls = conn.mockGetNamedCalls("setPeerId")
self.assertEqual(len(calls), 2)
calls[0].checkArgs(msg_id)
calls[1].checkArgs(msg_id_2)
if __name__ == '__main__':
unittest.main()
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/tests/storage/testStorageDBTests.py 0000664 0000000 0000000 00000060672 12601037530 0030605 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2009-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
from binascii import a2b_hex
import unittest
from neo.lib.util import add64, p64, u64
from neo.lib.protocol import CellStates, ZERO_HASH, ZERO_OID, ZERO_TID, MAX_TID
from .. import NeoUnitTestBase
class StorageDBTests(NeoUnitTestBase):
_last_ttid = ZERO_TID
def setUp(self):
NeoUnitTestBase.setUp(self)
@property
def db(self):
try:
return self._db
except AttributeError:
self.setNumPartitions(1)
return self._db
def _tearDown(self, success):
try:
self.__dict__.pop('_db', None).close()
except AttributeError:
pass
NeoUnitTestBase._tearDown(self, success)
def getDB(self, reset=0):
raise NotImplementedError
def setNumPartitions(self, num_partitions, reset=0):
try:
db = self._db
except AttributeError:
self._db = db = self.getDB(reset)
else:
if reset:
db.setup(reset)
else:
try:
n = db.getNumPartitions()
except KeyError:
n = 0
if num_partitions == n:
return
if num_partitions < n:
db.dropPartitions(n)
db.setNumPartitions(num_partitions)
self.assertEqual(num_partitions, db.getNumPartitions())
uuid = self.getStorageUUID()
db.setUUID(uuid)
self.assertEqual(uuid, db.getUUID())
db.changePartitionTable(1,
[(i, uuid, CellStates.UP_TO_DATE) for i in xrange(num_partitions)],
reset=True)
def checkConfigEntry(self, get_call, set_call, value):
# generic test for all configuration entries accessors
self.assertEqual(get_call(), None)
set_call(value)
self.assertEqual(get_call(), value)
set_call(value * 2)
self.assertEqual(get_call(), value * 2)
def test_UUID(self):
db = self.getDB()
self.checkConfigEntry(db.getUUID, db.setUUID, 123)
def test_Name(self):
db = self.getDB()
self.checkConfigEntry(db.getName, db.setName, 'TEST_NAME')
def test_15_PTID(self):
db = self.getDB()
self.checkConfigEntry(db.getPTID, db.setPTID, self.getPTID(1))
def test_getPartitionTable(self):
db = self.getDB()
ptid = self.getPTID(1)
uuid1, uuid2 = self.getStorageUUID(), self.getStorageUUID()
cell1 = (0, uuid1, CellStates.OUT_OF_DATE)
cell2 = (1, uuid1, CellStates.UP_TO_DATE)
db.changePartitionTable(ptid, [cell1, cell2], 1)
result = db.getPartitionTable()
self.assertEqual(set(result), {cell1, cell2})
def getOIDs(self, count):
return map(self.getOID, xrange(count))
def getTIDs(self, count):
tid_list = [self.getNextTID()]
while len(tid_list) != count:
tid_list.append(self.getNextTID(tid_list[-1]))
return tid_list
def getTransaction(self, oid_list):
self._last_ttid = ttid = add64(self._last_ttid, 1)
transaction = oid_list, 'user', 'desc', 'ext', False, ttid
H = "0" * 20
object_list = [(oid, self.db.holdData(H, '', 1), None)
for oid in oid_list]
return (transaction, object_list)
def checkSet(self, list1, list2):
self.assertEqual(set(list1), set(list2))
def test_getLastIDs(self):
tid1, tid2, tid3, tid4 = self.getTIDs(4)
oid1, oid2 = self.getOIDs(2)
txn, objs = self.getTransaction([oid1, oid2])
self.db.storeTransaction(tid1, objs, txn, False)
self.db.storeTransaction(tid2, objs, txn, False)
self.assertEqual(self.db.getLastIDs(),
(tid2, {0: tid2}, {0: tid2}, oid2))
self.db.storeTransaction(tid3, objs, txn)
tids = {0: tid2, None: tid3}
self.assertEqual(self.db.getLastIDs(), (tid3, tids, tids, oid2))
self.db.storeTransaction(tid4, objs, None)
self.assertEqual(self.db.getLastIDs(),
(tid4, tids, {0: tid2, None: tid4}, oid2))
self.db.finishTransaction(tid3)
self.assertEqual(self.db.getLastIDs(),
(tid4, {0: tid3}, {0: tid3, None: tid4}, oid2))
def test_getUnfinishedTIDList(self):
tid1, tid2, tid3, tid4 = self.getTIDs(4)
oid1, oid2 = self.getOIDs(2)
txn, objs = self.getTransaction([oid1, oid2])
# nothing pending
self.db.storeTransaction(tid1, objs, txn, False)
self.checkSet(self.db.getUnfinishedTIDList(), [])
# one unfinished txn
self.db.storeTransaction(tid2, objs, txn)
self.checkSet(self.db.getUnfinishedTIDList(), [tid2])
# no changes
self.db.storeTransaction(tid3, objs, None, False)
self.checkSet(self.db.getUnfinishedTIDList(), [tid2])
# a second txn known by objs only
self.db.storeTransaction(tid4, objs, None)
self.checkSet(self.db.getUnfinishedTIDList(), [tid2, tid4])
def test_objectPresent(self):
tid = self.getNextTID()
oid = self.getOID(1)
txn, objs = self.getTransaction([oid])
# not present
self.assertFalse(self.db.objectPresent(oid, tid, all=True))
self.assertFalse(self.db.objectPresent(oid, tid, all=False))
# available in temp table
self.db.storeTransaction(tid, objs, txn)
self.assertTrue(self.db.objectPresent(oid, tid, all=True))
self.assertFalse(self.db.objectPresent(oid, tid, all=False))
# available in both tables
self.db.finishTransaction(tid)
self.assertTrue(self.db.objectPresent(oid, tid, all=True))
self.assertTrue(self.db.objectPresent(oid, tid, all=False))
def test_getObject(self):
oid1, = self.getOIDs(1)
tid1, tid2 = self.getTIDs(2)
FOUND_BUT_NOT_VISIBLE = False
OBJECT_T1_NO_NEXT = (tid1, None, 1, "0"*20, '', None)
OBJECT_T1_NEXT = (tid1, tid2, 1, "0"*20, '', None)
OBJECT_T2 = (tid2, None, 1, "0"*20, '', None)
txn1, objs1 = self.getTransaction([oid1])
txn2, objs2 = self.getTransaction([oid1])
# non-present
self.assertEqual(self.db.getObject(oid1), None)
self.assertEqual(self.db.getObject(oid1, tid1), None)
self.assertEqual(self.db.getObject(oid1, before_tid=tid1), None)
# one non-commited version
self.db.storeTransaction(tid1, objs1, txn1)
self.assertEqual(self.db.getObject(oid1), None)
self.assertEqual(self.db.getObject(oid1, tid1), None)
self.assertEqual(self.db.getObject(oid1, before_tid=tid1), None)
# one commited version
self.db.finishTransaction(tid1)
self.assertEqual(self.db.getObject(oid1), OBJECT_T1_NO_NEXT)
self.assertEqual(self.db.getObject(oid1, tid1), OBJECT_T1_NO_NEXT)
self.assertEqual(self.db.getObject(oid1, before_tid=tid1),
FOUND_BUT_NOT_VISIBLE)
# two version available, one non-commited
self.db.storeTransaction(tid2, objs2, txn2)
self.assertEqual(self.db.getObject(oid1), OBJECT_T1_NO_NEXT)
self.assertEqual(self.db.getObject(oid1, tid1), OBJECT_T1_NO_NEXT)
self.assertEqual(self.db.getObject(oid1, before_tid=tid1),
FOUND_BUT_NOT_VISIBLE)
self.assertEqual(self.db.getObject(oid1, tid2), FOUND_BUT_NOT_VISIBLE)
self.assertEqual(self.db.getObject(oid1, before_tid=tid2),
OBJECT_T1_NO_NEXT)
# two commited versions
self.db.finishTransaction(tid2)
self.assertEqual(self.db.getObject(oid1), OBJECT_T2)
self.assertEqual(self.db.getObject(oid1, tid1), OBJECT_T1_NEXT)
self.assertEqual(self.db.getObject(oid1, before_tid=tid1),
FOUND_BUT_NOT_VISIBLE)
self.assertEqual(self.db.getObject(oid1, tid2), OBJECT_T2)
self.assertEqual(self.db.getObject(oid1, before_tid=tid2),
OBJECT_T1_NEXT)
def test_setPartitionTable(self):
db = self.getDB()
ptid = self.getPTID(1)
uuid = self.getStorageUUID()
cell1 = 0, uuid, CellStates.OUT_OF_DATE
cell2 = 1, uuid, CellStates.UP_TO_DATE
cell3 = 1, uuid, CellStates.DISCARDED
# no partition table
self.assertEqual(list(db.getPartitionTable()), [])
# set one
db.changePartitionTable(ptid, [cell1], 1)
result = db.getPartitionTable()
self.assertEqual(list(result), [cell1])
# then another
db.changePartitionTable(ptid, [cell2], 1)
result = db.getPartitionTable()
self.assertEqual(list(result), [cell2])
# drop discarded cells
db.changePartitionTable(ptid, [cell2, cell3], 1)
result = db.getPartitionTable()
self.assertEqual(list(result), [])
def test_changePartitionTable(self):
db = self.getDB()
ptid = self.getPTID(1)
uuid = self.getStorageUUID()
cell1 = 0, uuid, CellStates.OUT_OF_DATE
cell2 = 1, uuid, CellStates.UP_TO_DATE
cell3 = 1, uuid, CellStates.DISCARDED
# no partition table
self.assertEqual(list(db.getPartitionTable()), [])
# set one
db.changePartitionTable(ptid, [cell1])
result = db.getPartitionTable()
self.assertEqual(list(result), [cell1])
# add more entries
db.changePartitionTable(ptid, [cell2])
result = db.getPartitionTable()
self.assertEqual(set(result), {cell1, cell2})
# drop discarded cells
db.changePartitionTable(ptid, [cell2, cell3])
result = db.getPartitionTable()
self.assertEqual(list(result), [cell1])
def test_dropUnfinishedData(self):
oid1, oid2 = self.getOIDs(2)
tid1, tid2 = self.getTIDs(2)
txn1, objs1 = self.getTransaction([oid1])
txn2, objs2 = self.getTransaction([oid1])
# nothing
self.assertEqual(self.db.getObject(oid1), None)
self.assertEqual(self.db.getObject(oid2), None)
self.assertEqual(self.db.getUnfinishedTIDList(), [])
# one is still pending
self.db.storeTransaction(tid1, objs1, txn1)
self.db.storeTransaction(tid2, objs2, txn2)
self.db.finishTransaction(tid1)
result = self.db.getObject(oid1)
self.assertEqual(result, (tid1, None, 1, "0"*20, '', None))
self.assertEqual(self.db.getObject(oid2), None)
self.assertEqual(self.db.getUnfinishedTIDList(), [tid2])
# drop it
self.db.dropUnfinishedData()
self.assertEqual(self.db.getUnfinishedTIDList(), [])
result = self.db.getObject(oid1)
self.assertEqual(result, (tid1, None, 1, "0"*20, '', None))
self.assertEqual(self.db.getObject(oid2), None)
def test_storeTransaction(self):
oid1, oid2 = self.getOIDs(2)
tid1, tid2 = self.getTIDs(2)
txn1, objs1 = self.getTransaction([oid1])
txn2, objs2 = self.getTransaction([oid2])
# nothing in database
self.assertEqual(self.db.getLastIDs(), (None, {}, {}, None))
self.assertEqual(self.db.getUnfinishedTIDList(), [])
self.assertEqual(self.db.getObject(oid1), None)
self.assertEqual(self.db.getObject(oid2), None)
self.assertEqual(self.db.getTransaction(tid1, True), None)
self.assertEqual(self.db.getTransaction(tid2, True), None)
self.assertEqual(self.db.getTransaction(tid1, False), None)
self.assertEqual(self.db.getTransaction(tid2, False), None)
# store in temporary tables
self.db.storeTransaction(tid1, objs1, txn1)
self.db.storeTransaction(tid2, objs2, txn2)
result = self.db.getTransaction(tid1, True)
self.assertEqual(result, ([oid1], 'user', 'desc', 'ext', False, p64(1)))
result = self.db.getTransaction(tid2, True)
self.assertEqual(result, ([oid2], 'user', 'desc', 'ext', False, p64(2)))
self.assertEqual(self.db.getTransaction(tid1, False), None)
self.assertEqual(self.db.getTransaction(tid2, False), None)
# commit pending transaction
self.db.finishTransaction(tid1)
self.db.finishTransaction(tid2)
result = self.db.getTransaction(tid1, True)
self.assertEqual(result, ([oid1], 'user', 'desc', 'ext', False, p64(1)))
result = self.db.getTransaction(tid2, True)
self.assertEqual(result, ([oid2], 'user', 'desc', 'ext', False, p64(2)))
result = self.db.getTransaction(tid1, False)
self.assertEqual(result, ([oid1], 'user', 'desc', 'ext', False, p64(1)))
result = self.db.getTransaction(tid2, False)
self.assertEqual(result, ([oid2], 'user', 'desc', 'ext', False, p64(2)))
def test_askFinishTransaction(self):
oid1, oid2 = self.getOIDs(2)
tid1, tid2 = self.getTIDs(2)
txn1, objs1 = self.getTransaction([oid1])
txn2, objs2 = self.getTransaction([oid2])
# stored but not finished
self.db.storeTransaction(tid1, objs1, txn1)
self.db.storeTransaction(tid2, objs2, txn2)
result = self.db.getTransaction(tid1, True)
self.assertEqual(result, ([oid1], 'user', 'desc', 'ext', False, p64(1)))
result = self.db.getTransaction(tid2, True)
self.assertEqual(result, ([oid2], 'user', 'desc', 'ext', False, p64(2)))
self.assertEqual(self.db.getTransaction(tid1, False), None)
self.assertEqual(self.db.getTransaction(tid2, False), None)
# stored and finished
self.db.finishTransaction(tid1)
self.db.finishTransaction(tid2)
result = self.db.getTransaction(tid1, True)
self.assertEqual(result, ([oid1], 'user', 'desc', 'ext', False, p64(1)))
result = self.db.getTransaction(tid2, True)
self.assertEqual(result, ([oid2], 'user', 'desc', 'ext', False, p64(2)))
result = self.db.getTransaction(tid1, False)
self.assertEqual(result, ([oid1], 'user', 'desc', 'ext', False, p64(1)))
result = self.db.getTransaction(tid2, False)
self.assertEqual(result, ([oid2], 'user', 'desc', 'ext', False, p64(2)))
def test_deleteTransaction(self):
oid1, oid2 = self.getOIDs(2)
tid1, tid2 = self.getTIDs(2)
txn1, objs1 = self.getTransaction([oid1])
txn2, objs2 = self.getTransaction([oid2])
self.db.storeTransaction(tid1, objs1, txn1)
self.db.storeTransaction(tid2, objs2, txn2)
self.db.finishTransaction(tid1)
self.db.deleteTransaction(tid1, [oid1])
self.db.deleteTransaction(tid2, [oid2])
self.assertEqual(self.db.getTransaction(tid1, True), None)
self.assertEqual(self.db.getTransaction(tid2, True), None)
def test_deleteObject(self):
oid1, oid2 = self.getOIDs(2)
tid1, tid2 = self.getTIDs(2)
txn1, objs1 = self.getTransaction([oid1, oid2])
txn2, objs2 = self.getTransaction([oid1, oid2])
self.db.storeTransaction(tid1, objs1, txn1)
self.db.storeTransaction(tid2, objs2, txn2)
self.db.finishTransaction(tid1)
self.db.finishTransaction(tid2)
self.db.deleteObject(oid1)
self.assertEqual(self.db.getObject(oid1, tid=tid1), None)
self.assertEqual(self.db.getObject(oid1, tid=tid2), None)
self.db.deleteObject(oid2, serial=tid1)
self.assertFalse(self.db.getObject(oid2, tid=tid1))
self.assertEqual(self.db.getObject(oid2, tid=tid2),
(tid2, None, 1, "0" * 20, '', None))
def test_deleteRange(self):
np = 4
self.setNumPartitions(np)
t1, t2, t3 = map(self.getOID, (1, 2, 3))
oid_list = self.getOIDs(np * 2)
for tid in t1, t2, t3:
txn, objs = self.getTransaction(oid_list)
self.db.storeTransaction(tid, objs, txn)
self.db.finishTransaction(tid)
def check(offset, tid_list, *tids):
self.assertEqual(self.db.getReplicationTIDList(ZERO_TID,
MAX_TID, len(tid_list) + 1, offset), tid_list)
expected = [(t, oid_list[offset+i]) for t in tids for i in (0, np)]
self.assertEqual(self.db.getReplicationObjectList(ZERO_TID,
MAX_TID, len(expected) + 1, offset, ZERO_OID), expected)
self.db._deleteRange(0, MAX_TID)
self.db._deleteRange(0, max_tid=ZERO_TID)
check(0, [], t1, t2, t3)
self.db._deleteRange(0); check(0, [])
self.db._deleteRange(1, t2); check(1, [t1], t1, t2)
self.db._deleteRange(2, max_tid=t2); check(2, [], t3)
self.db._deleteRange(3, t1, t2); check(3, [t3], t1, t3)
def test_getTransaction(self):
oid1, oid2 = self.getOIDs(2)
tid1, tid2 = self.getTIDs(2)
txn1, objs1 = self.getTransaction([oid1])
txn2, objs2 = self.getTransaction([oid2])
# get from temporary table or not
self.db.storeTransaction(tid1, objs1, txn1)
self.db.storeTransaction(tid2, objs2, txn2)
self.db.finishTransaction(tid1)
result = self.db.getTransaction(tid1, True)
self.assertEqual(result, ([oid1], 'user', 'desc', 'ext', False, p64(1)))
result = self.db.getTransaction(tid2, True)
self.assertEqual(result, ([oid2], 'user', 'desc', 'ext', False, p64(2)))
# get from non-temporary only
result = self.db.getTransaction(tid1, False)
self.assertEqual(result, ([oid1], 'user', 'desc', 'ext', False, p64(1)))
self.assertEqual(self.db.getTransaction(tid2, False), None)
def test_getObjectHistory(self):
oid = self.getOID(1)
tid1, tid2, tid3 = self.getTIDs(3)
txn1, objs1 = self.getTransaction([oid])
txn2, objs2 = self.getTransaction([oid])
txn3, objs3 = self.getTransaction([oid])
# one revision
self.db.storeTransaction(tid1, objs1, txn1)
self.db.finishTransaction(tid1)
result = self.db.getObjectHistory(oid, 0, 3)
self.assertEqual(result, [(tid1, 0)])
result = self.db.getObjectHistory(oid, 1, 1)
self.assertEqual(result, None)
# two revisions
self.db.storeTransaction(tid2, objs2, txn2)
self.db.finishTransaction(tid2)
result = self.db.getObjectHistory(oid, 0, 3)
self.assertEqual(result, [(tid2, 0), (tid1, 0)])
result = self.db.getObjectHistory(oid, 1, 3)
self.assertEqual(result, [(tid1, 0)])
result = self.db.getObjectHistory(oid, 2, 3)
self.assertEqual(result, None)
def _storeTransactions(self, count):
# use OID generator to know result of tid % N
tid_list = self.getOIDs(count)
oid = self.getOID(1)
for tid in tid_list:
txn, objs = self.getTransaction([oid])
self.db.storeTransaction(tid, objs, txn)
self.db.finishTransaction(tid)
return tid_list
def test_getTIDList(self):
self.setNumPartitions(2, True)
tid1, tid2, tid3, tid4 = self._storeTransactions(4)
# get tids
# - all partitions
result = self.db.getTIDList(0, 4, [0, 1])
self.checkSet(result, [tid1, tid2, tid3, tid4])
# - one partition
result = self.db.getTIDList(0, 4, [0])
self.checkSet(result, [tid1, tid3])
result = self.db.getTIDList(0, 4, [1])
self.checkSet(result, [tid2, tid4])
# get a subset of tids
result = self.db.getTIDList(0, 1, [0])
self.checkSet(result, [tid3]) # desc order
result = self.db.getTIDList(1, 1, [1])
self.checkSet(result, [tid2])
result = self.db.getTIDList(2, 2, [0])
self.checkSet(result, [])
def test_getReplicationTIDList(self):
self.setNumPartitions(2, True)
tid1, tid2, tid3, tid4 = self._storeTransactions(4)
# - one partition
result = self.db.getReplicationTIDList(ZERO_TID, MAX_TID, 10, 0)
self.checkSet(result, [tid1, tid3])
# - another partition
result = self.db.getReplicationTIDList(ZERO_TID, MAX_TID, 10, 1)
self.checkSet(result, [tid2, tid4])
# - min_tid is inclusive
result = self.db.getReplicationTIDList(tid3, MAX_TID, 10, 0)
self.checkSet(result, [tid3])
# - max tid is inclusive
result = self.db.getReplicationTIDList(ZERO_TID, tid2, 10, 0)
self.checkSet(result, [tid1])
# - limit
result = self.db.getReplicationTIDList(ZERO_TID, MAX_TID, 1, 0)
self.checkSet(result, [tid1])
def test_checkRange(self):
def check(trans, obj, *args):
self.assertEqual(trans, self.db.checkTIDRange(*args))
self.assertEqual(obj, self.db.checkSerialRange(*(args+(ZERO_OID,))))
self.setNumPartitions(2, True)
tid1, tid2, tid3, tid4 = self._storeTransactions(4)
z = 0, ZERO_HASH, ZERO_TID, ZERO_HASH, ZERO_OID
# - one partition
check((2, a2b_hex('84320eb8dbbe583f67055c15155ab6794f11654d'), tid3),
z,
0, 10, ZERO_TID, MAX_TID)
# - another partition
check((2, a2b_hex('1f02f98cf775a9e0ce9252ff5972dce728c4ddb0'), tid4),
(4, a2b_hex('e5b47bddeae2096220298df686737d939a27d736'), tid4,
a2b_hex('1e9093698424b5370e19acd2d5fc20dcd56a32cd'), p64(1)),
1, 10, ZERO_TID, MAX_TID)
self.assertEqual(
(3, a2b_hex('b85e2d4914e22b5ad3b82b312b3dc405dc17dcb8'), tid4,
a2b_hex('1b6d73ecdc064595fe915a5c26da06b195caccaa'), p64(1)),
self.db.checkSerialRange(1, 10, ZERO_TID, MAX_TID, p64(2)))
# - min_tid is inclusive
check((1, a2b_hex('da4b9237bacccdf19c0760cab7aec4a8359010b0'), tid3),
z,
0, 10, tid3, MAX_TID)
# - max tid is inclusive
x = 1, a2b_hex('b6589fc6ab0dc82cf12099d1c2d40ab994e8410c'), tid1
check(x, z, 0, 10, ZERO_TID, tid2)
# - limit
y = 1, a2b_hex('356a192b7913b04c54574d18c28d46e6395428ab'), tid2
check(y, x + y[1:], 1, 1, ZERO_TID, MAX_TID)
def test_findUndoTID(self):
self.setNumPartitions(4, True)
db = self.db
tid1 = self.getNextTID()
tid2 = self.getNextTID()
tid3 = self.getNextTID()
tid4 = self.getNextTID()
tid5 = self.getNextTID()
oid1 = self.getOID(1)
foo = db.holdData("3" * 20, 'foo', 0)
bar = db.holdData("4" * 20, 'bar', 0)
db.releaseData((foo, bar))
db.storeTransaction(
tid1, (
(oid1, foo, None),
), None, temporary=False)
# Undoing oid1 tid1, OK: tid1 is latest
# Result: current tid is tid1, data_tid is None (undoing object
# creation)
self.assertEqual(
db.findUndoTID(oid1, tid5, tid4, tid1, None),
(tid1, None, True))
# Store a new transaction
db.storeTransaction(
tid2, (
(oid1, bar, None),
), None, temporary=False)
# Undoing oid1 tid2, OK: tid2 is latest
# Result: current tid is tid2, data_tid is tid1
self.assertEqual(
db.findUndoTID(oid1, tid5, tid4, tid2, None),
(tid2, tid1, True))
# Undoing oid1 tid1, Error: tid2 is latest
# Result: current tid is tid2, data_tid is -1
self.assertEqual(
db.findUndoTID(oid1, tid5, tid4, tid1, None),
(tid2, None, False))
# Undoing oid1 tid1 with tid2 being undone in same transaction,
# OK: tid1 is latest
# Result: current tid is tid1, data_tid is None (undoing object
# creation)
# Explanation of transaction_object: oid1, no data but a data serial
# to tid1
self.assertEqual(
db.findUndoTID(oid1, tid5, tid4, tid1,
(u64(oid1), None, tid1)),
(tid1, None, True))
# Store a new transaction
db.storeTransaction(
tid3, (
(oid1, None, tid1),
), None, temporary=False)
# Undoing oid1 tid1, OK: tid3 is latest with tid1 data
# Result: current tid is tid2, data_tid is None (undoing object
# creation)
self.assertEqual(
db.findUndoTID(oid1, tid5, tid4, tid1, None),
(tid3, None, True))
if __name__ == "__main__":
unittest.main()
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/tests/storage/testStorageMySQL.py 0000664 0000000 0000000 00000006733 12601037530 0030240 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2009-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import unittest
import MySQLdb
from mock import Mock
from neo.lib.exception import DatabaseFailure
from .testStorageDBTests import StorageDBTests
from neo.storage.database.mysqldb import MySQLDatabaseManager
NEO_SQL_DATABASE = 'test_mysqldb0'
NEO_SQL_USER = 'test'
class StorageMySQLdbTests(StorageDBTests):
engine = None
def getDB(self, reset=0):
self.prepareDatabase(number=1, prefix=NEO_SQL_DATABASE[:-1])
# db manager
database = '%s@%s' % (NEO_SQL_USER, NEO_SQL_DATABASE)
db = MySQLDatabaseManager(database, self.engine)
self.assertEqual(db.db, NEO_SQL_DATABASE)
self.assertEqual(db.user, NEO_SQL_USER)
db.setup(reset)
return db
def test_query1(self):
# fake result object
from array import array
result_object = Mock({
"num_rows": 1,
"fetch_row": ((1, 2, array('b', (1, 2, ))), ),
})
# expected formatted result
expected_result = (
(1, 2, '\x01\x02', ),
)
self.db.conn = Mock({ 'store_result': result_object })
result = self.db.query('SELECT ')
self.assertEqual(result, expected_result)
calls = self.db.conn.mockGetNamedCalls('query')
self.assertEqual(len(calls), 1)
calls[0].checkArgs('SELECT ')
def test_query2(self):
# test the OperationalError exception
# fake object, raise exception during the first call
from MySQLdb import OperationalError
from MySQLdb.constants.CR import SERVER_GONE_ERROR
class FakeConn(object):
def query(*args):
raise OperationalError(SERVER_GONE_ERROR, 'this is a test')
self.db.conn = FakeConn()
self.connect_called = False
def connect_hook():
# mock object, break raise/connect loop
self.db.conn = Mock()
self.connect_called = True
self.db._connect = connect_hook
# make a query, exception will be raised then connect() will be
# called and the second query will use the mock object
self.db.query('INSERT')
self.assertTrue(self.connect_called)
def test_query3(self):
# OperationalError > raise DatabaseFailure exception
from MySQLdb import OperationalError
class FakeConn(object):
def close(self):
pass
def query(*args):
raise OperationalError(-1, 'this is a test')
self.db.conn = FakeConn()
self.assertRaises(DatabaseFailure, self.db.query, 'QUERY')
def test_escape(self):
self.assertEqual(self.db.escape('a"b'), 'a\\"b')
self.assertEqual(self.db.escape("a'b"), "a\\'b")
class StorageMySQLdbTokuDBTests(StorageMySQLdbTests):
engine = "TokuDB"
del StorageDBTests
if __name__ == "__main__":
unittest.main()
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/tests/storage/testStorageSQLite.py 0000664 0000000 0000000 00000002010 12601037530 0030414 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2009-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import unittest
from .testStorageDBTests import StorageDBTests
from neo.storage.database.sqlite import SQLiteDatabaseManager
class StorageSQLiteTests(StorageDBTests):
def getDB(self, reset=0):
db = SQLiteDatabaseManager(':memory:')
db.setup(reset)
return db
del StorageDBTests
if __name__ == "__main__":
unittest.main()
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/tests/storage/testTransactions.py 0000664 0000000 0000000 00000042362 12601037530 0030414 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2010-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import random
import unittest
from mock import Mock, ReturnValues
from .. import NeoUnitTestBase
from neo.storage.transactions import Transaction, TransactionManager
from neo.storage.transactions import ConflictError, DelayedError
class TransactionTests(NeoUnitTestBase):
def testInit(self):
uuid = self.getClientUUID()
ttid = self.getNextTID()
tid = self.getNextTID()
txn = Transaction(uuid, ttid)
self.assertEqual(txn.getUUID(), uuid)
self.assertEqual(txn.getTTID(), ttid)
self.assertEqual(txn.getTID(), None)
txn.setTID(tid)
self.assertEqual(txn.getTID(), tid)
self.assertEqual(txn.getObjectList(), [])
self.assertEqual(txn.getOIDList(), [])
def testLock(self):
txn = Transaction(self.getClientUUID(), self.getNextTID())
self.assertFalse(txn.isLocked())
txn.lock()
self.assertTrue(txn.isLocked())
# disallow lock more than once
self.assertRaises(AssertionError, txn.lock)
def testTransaction(self):
txn = Transaction(self.getClientUUID(), self.getNextTID())
repr(txn) # check __repr__ does not raise
oid_list = [self.getOID(1), self.getOID(2)]
txn_info = (oid_list, 'USER', 'DESC', 'EXT', False)
txn.prepare(*txn_info)
self.assertEqual(txn.getTransactionInformations(),
txn_info + (txn.getTTID(),))
def testObjects(self):
txn = Transaction(self.getClientUUID(), self.getNextTID())
oid1, oid2 = self.getOID(1), self.getOID(2)
object1 = oid1, "0" * 20, None
object2 = oid2, "1" * 20, None
self.assertEqual(txn.getObjectList(), [])
self.assertEqual(txn.getOIDList(), [])
txn.addObject(*object1)
self.assertEqual(txn.getObjectList(), [object1])
self.assertEqual(txn.getOIDList(), [oid1])
txn.addObject(*object2)
self.assertEqual(txn.getObjectList(), [object1, object2])
self.assertEqual(txn.getOIDList(), [oid1, oid2])
def test_getObject(self):
oid_1 = self.getOID(1)
oid_2 = self.getOID(2)
txn = Transaction(self.getClientUUID(), self.getNextTID())
object_info = oid_1, None, None
txn.addObject(*object_info)
self.assertRaises(KeyError, txn.getObject, oid_2)
self.assertEqual(txn.getObject(oid_1), object_info)
class TransactionManagerTests(NeoUnitTestBase):
def setUp(self):
NeoUnitTestBase.setUp(self)
self.app = Mock()
# no history
self.app.dm = Mock({'getObjectHistory': []})
self.app.pt = Mock({'isAssigned': True})
self.manager = TransactionManager(self.app)
self.ltid = None
def _getTransaction(self):
tid = self.getNextTID(self.ltid)
oid_list = [self.getOID(1), self.getOID(2)]
return (tid, (oid_list, 'USER', 'DESC', 'EXT', False))
def _storeTransactionObjects(self, tid, txn):
for i, oid in enumerate(txn[0]):
self.manager.storeObject(tid, None,
oid, 1, '%020d' % i, '0' + str(i), None)
def _getObject(self, value):
oid = self.getOID(value)
serial = self.getNextTID()
return (serial, (oid, 1, '%020d' % value, 'O' + str(value), None))
def _checkTransactionStored(self, *args):
calls = self.app.dm.mockGetNamedCalls('storeTransaction')
self.assertEqual(len(calls), 1)
calls[0].checkArgs(*args)
def _checkTransactionFinished(self, tid):
calls = self.app.dm.mockGetNamedCalls('finishTransaction')
self.assertEqual(len(calls), 1)
calls[0].checkArgs(tid)
def _checkQueuedEventExecuted(self, number=1):
calls = self.app.mockGetNamedCalls('executeQueuedEvents')
self.assertEqual(len(calls), number)
def testSimpleCase(self):
""" One node, one transaction, not abort """
data_id_list = random.random(), random.random()
self.app.dm.mockAddReturnValues(holdData=ReturnValues(*data_id_list))
uuid = self.getClientUUID()
ttid = self.getNextTID()
tid, txn = self._getTransaction()
serial1, object1 = self._getObject(1)
serial2, object2 = self._getObject(2)
self.manager.register(uuid, ttid)
self.manager.storeTransaction(ttid, *txn)
self.manager.storeObject(ttid, serial1, *object1)
self.manager.storeObject(ttid, serial2, *object2)
self.assertTrue(ttid in self.manager)
self.manager.lock(ttid, tid, txn[0])
self._checkTransactionStored(tid, [
(object1[0], data_id_list[0], object1[4]),
(object2[0], data_id_list[1], object2[4]),
], txn + (ttid,))
self.manager.unlock(ttid)
self.assertFalse(ttid in self.manager)
self._checkTransactionFinished(tid)
def testDelayed(self):
""" Two transactions, the first cause the second to be delayed """
uuid = self.getClientUUID()
ttid1 = self.getNextTID()
ttid2 = self.getNextTID()
tid1, txn1 = self._getTransaction()
tid2, txn2 = self._getTransaction()
serial, obj = self._getObject(1)
# first transaction lock the object
self.manager.register(uuid, ttid1)
self.manager.storeTransaction(ttid1, *txn1)
self.assertTrue(ttid1 in self.manager)
self._storeTransactionObjects(ttid1, txn1)
self.manager.lock(ttid1, tid1, txn1[0])
# the second is delayed
self.manager.register(uuid, ttid2)
self.manager.storeTransaction(ttid2, *txn2)
self.assertTrue(ttid2 in self.manager)
self.assertRaises(DelayedError, self.manager.storeObject,
ttid2, serial, *obj)
def testUnresolvableConflict(self):
""" A newer transaction has already modified an object """
uuid = self.getClientUUID()
ttid1 = self.getNextTID()
ttid2 = self.getNextTID()
tid1, txn1 = self._getTransaction()
tid2, txn2 = self._getTransaction()
serial, obj = self._getObject(1)
# the (later) transaction lock (change) the object
self.manager.register(uuid, ttid2)
self.manager.storeTransaction(ttid2, *txn2)
self.assertTrue(ttid2 in self.manager)
self._storeTransactionObjects(ttid2, txn2)
self.manager.lock(ttid2, tid2, txn2[0])
# the previous it's not using the latest version
self.manager.register(uuid, ttid1)
self.manager.storeTransaction(ttid1, *txn1)
self.assertTrue(ttid1 in self.manager)
self.assertRaises(ConflictError, self.manager.storeObject,
ttid1, serial, *obj)
def testResolvableConflict(self):
""" Try to store an object with the lastest revision """
uuid = self.getClientUUID()
tid, txn = self._getTransaction()
serial, obj = self._getObject(1)
next_serial = self.getNextTID(serial)
# try to store without the last revision
self.app.dm = Mock({'getLastObjectTID': next_serial})
self.manager.register(uuid, tid)
self.manager.storeTransaction(tid, *txn)
self.assertRaises(ConflictError, self.manager.storeObject,
tid, serial, *obj)
def testLockDelayed(self):
""" Check lock delay """
uuid1 = self.getClientUUID()
uuid2 = self.getClientUUID()
self.assertNotEqual(uuid1, uuid2)
ttid1 = self.getNextTID()
ttid2 = self.getNextTID()
tid1, txn1 = self._getTransaction()
tid2, txn2 = self._getTransaction()
serial1, obj1 = self._getObject(1)
serial2, obj2 = self._getObject(2)
# first transaction lock objects
self.manager.register(uuid1, ttid1)
self.manager.storeTransaction(ttid1, *txn1)
self.assertTrue(ttid1 in self.manager)
self.manager.storeObject(ttid1, serial1, *obj1)
self.manager.storeObject(ttid1, serial1, *obj2)
self.manager.lock(ttid1, tid1, txn1[0])
# second transaction is delayed
self.manager.register(uuid2, ttid2)
self.manager.storeTransaction(ttid2, *txn2)
self.assertTrue(ttid2 in self.manager)
self.assertRaises(DelayedError, self.manager.storeObject,
ttid2, serial1, *obj1)
self.assertRaises(DelayedError, self.manager.storeObject,
ttid2, serial2, *obj2)
def testLockConflict(self):
""" Check lock conflict """
uuid1 = self.getClientUUID()
uuid2 = self.getClientUUID()
self.assertNotEqual(uuid1, uuid2)
ttid1 = self.getNextTID()
ttid2 = self.getNextTID()
tid1, txn1 = self._getTransaction()
tid2, txn2 = self._getTransaction()
serial1, obj1 = self._getObject(1)
serial2, obj2 = self._getObject(2)
# the second transaction lock objects
self.manager.register(uuid2, ttid2)
self.manager.storeTransaction(ttid2, *txn2)
self.manager.storeObject(ttid2, serial1, *obj1)
self.manager.storeObject(ttid2, serial2, *obj2)
self.assertTrue(ttid2 in self.manager)
self.manager.lock(ttid2, tid2, txn1[0])
# the first get a conflict
self.manager.register(uuid1, ttid1)
self.manager.storeTransaction(ttid1, *txn1)
self.assertTrue(ttid1 in self.manager)
self.assertRaises(ConflictError, self.manager.storeObject,
ttid1, serial1, *obj1)
self.assertRaises(ConflictError, self.manager.storeObject,
ttid1, serial2, *obj2)
def testAbortUnlocked(self):
""" Abort a non-locked transaction """
uuid = self.getClientUUID()
tid, txn = self._getTransaction()
serial, obj = self._getObject(1)
self.manager.register(uuid, tid)
self.manager.storeTransaction(tid, *txn)
self.manager.storeObject(tid, serial, *obj)
self.assertTrue(tid in self.manager)
# transaction is not locked
self.manager.abort(tid)
self.assertFalse(tid in self.manager)
self.assertFalse(self.manager.loadLocked(obj[0]))
self._checkQueuedEventExecuted()
def testAbortLockedDoNothing(self):
""" Try to abort a locked transaction """
uuid = self.getClientUUID()
ttid = self.getNextTID()
tid, txn = self._getTransaction()
self.manager.register(uuid, ttid)
self.manager.storeTransaction(ttid, *txn)
self._storeTransactionObjects(ttid, txn)
# lock transaction
self.manager.lock(ttid, tid, txn[0])
self.assertTrue(ttid in self.manager)
self.manager.abort(ttid)
self.assertTrue(ttid in self.manager)
for oid in txn[0]:
self.assertTrue(self.manager.loadLocked(oid))
self._checkQueuedEventExecuted(number=0)
def testAbortForNode(self):
""" Abort transaction for a node """
uuid1 = self.getClientUUID()
uuid2 = self.getClientUUID()
self.assertNotEqual(uuid1, uuid2)
ttid1 = self.getNextTID()
ttid2 = self.getNextTID()
ttid3 = self.getNextTID()
tid1, txn1 = self._getTransaction()
tid2, txn2 = self._getTransaction()
tid3, txn3 = self._getTransaction()
self.manager.register(uuid1, ttid1)
self.manager.register(uuid2, ttid2)
self.manager.register(uuid2, ttid3)
self.manager.storeTransaction(ttid1, *txn1)
# node 2 owns tid2 & tid3 and lock tid2 only
self.manager.storeTransaction(ttid2, *txn2)
self.manager.storeTransaction(ttid3, *txn3)
self._storeTransactionObjects(ttid2, txn2)
self.manager.lock(ttid2, tid2, txn2[0])
self.assertTrue(ttid1 in self.manager)
self.assertTrue(ttid2 in self.manager)
self.assertTrue(ttid3 in self.manager)
self.manager.abortFor(uuid2)
# only tid3 is aborted
self.assertTrue(ttid1 in self.manager)
self.assertTrue(ttid2 in self.manager)
self.assertFalse(ttid3 in self.manager)
self._checkQueuedEventExecuted(number=1)
def testReset(self):
""" Reset the manager """
uuid = self.getClientUUID()
tid, txn = self._getTransaction()
ttid = self.getNextTID()
self.manager.register(uuid, ttid)
self.manager.storeTransaction(ttid, *txn)
self._storeTransactionObjects(ttid, txn)
self.manager.lock(ttid, tid, txn[0])
self.assertTrue(ttid in self.manager)
self.manager.reset()
self.assertFalse(ttid in self.manager)
for oid in txn[0]:
self.assertFalse(self.manager.loadLocked(oid))
def test_getObjectFromTransaction(self):
data_id = random.random()
self.app.dm.mockAddReturnValues(holdData=ReturnValues(data_id))
uuid = self.getClientUUID()
tid1, txn1 = self._getTransaction()
tid2, txn2 = self._getTransaction()
serial1, obj1 = self._getObject(1)
serial2, obj2 = self._getObject(2)
self.manager.register(uuid, tid1)
self.manager.storeObject(tid1, serial1, *obj1)
self.assertEqual(self.manager.getObjectFromTransaction(tid2, obj1[0]),
None)
self.assertEqual(self.manager.getObjectFromTransaction(tid1, obj2[0]),
None)
self.assertEqual(self.manager.getObjectFromTransaction(tid1, obj1[0]),
(obj1[0], data_id, obj1[4]))
def test_getLockingTID(self):
uuid = self.getClientUUID()
serial1, obj1 = self._getObject(1)
oid1 = obj1[0]
tid1, txn1 = self._getTransaction()
self.assertEqual(self.manager.getLockingTID(oid1), None)
self.manager.register(uuid, tid1)
self.manager.storeObject(tid1, serial1, *obj1)
self.assertEqual(self.manager.getLockingTID(oid1), tid1)
def test_updateObjectDataForPack(self):
ram_serial = self.getNextTID()
oid = self.getOID(1)
orig_serial = self.getNextTID()
uuid = self.getClientUUID()
locking_serial = self.getNextTID()
other_serial = self.getNextTID()
new_serial = self.getNextTID()
checksum = "2" * 20
self.manager.register(uuid, locking_serial)
# Object not known, nothing happens
self.assertEqual(self.manager.getObjectFromTransaction(locking_serial,
oid), None)
self.manager.updateObjectDataForPack(oid, orig_serial, None, checksum)
self.assertEqual(self.manager.getObjectFromTransaction(locking_serial,
oid), None)
self.manager.abort(locking_serial, even_if_locked=True)
# Object known, but doesn't point at orig_serial, it is not updated
self.manager.register(uuid, locking_serial)
self.manager.storeObject(locking_serial, ram_serial, oid, 0, "3" * 20,
'bar', None)
holdData = self.app.dm.mockGetNamedCalls('holdData')
self.assertEqual(holdData.pop(0).params, ("3" * 20, 'bar', 0))
orig_object = self.manager.getObjectFromTransaction(locking_serial,
oid)
self.manager.updateObjectDataForPack(oid, orig_serial, None, checksum)
self.assertEqual(self.manager.getObjectFromTransaction(locking_serial,
oid), orig_object)
self.manager.abort(locking_serial, even_if_locked=True)
self.manager.register(uuid, locking_serial)
self.manager.storeObject(locking_serial, ram_serial, oid, None, None,
None, other_serial)
orig_object = self.manager.getObjectFromTransaction(locking_serial,
oid)
self.manager.updateObjectDataForPack(oid, orig_serial, None, checksum)
self.assertEqual(self.manager.getObjectFromTransaction(locking_serial,
oid), orig_object)
self.manager.abort(locking_serial, even_if_locked=True)
# Object known and points at undone data it gets updated
self.manager.register(uuid, locking_serial)
self.manager.storeObject(locking_serial, ram_serial, oid, None, None,
None, orig_serial)
self.manager.updateObjectDataForPack(oid, orig_serial, new_serial,
checksum)
self.assertEqual(self.manager.getObjectFromTransaction(locking_serial,
oid), (oid, None, new_serial))
self.manager.abort(locking_serial, even_if_locked=True)
self.manager.register(uuid, locking_serial)
self.manager.storeObject(locking_serial, ram_serial, oid, None, None,
None, orig_serial)
self.manager.updateObjectDataForPack(oid, orig_serial, None, checksum)
self.assertEqual(holdData.pop(0).params, (checksum,))
self.assertEqual(self.manager.getObjectFromTransaction(locking_serial,
oid), (oid, checksum, None))
self.manager.abort(locking_serial, even_if_locked=True)
self.assertFalse(holdData)
if __name__ == "__main__":
unittest.main()
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/tests/storage/testVerificationHandler.py 0000664 0000000 0000000 00000017330 12601037530 0031661 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2009-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import unittest
from mock import Mock
from .. import NeoUnitTestBase
from neo.lib.pt import PartitionTable
from neo.storage.app import Application
from neo.storage.handlers.verification import VerificationHandler
from neo.lib.protocol import CellStates, ErrorCodes
from neo.lib.exception import PrimaryFailure
from neo.lib.util import p64, u64
class StorageVerificationHandlerTests(NeoUnitTestBase):
def setUp(self):
NeoUnitTestBase.setUp(self)
self.prepareDatabase(number=1)
# create an application object
config = self.getStorageConfiguration(master_number=1)
self.app = Application(config)
self.verification = VerificationHandler(self.app)
# define some variable to simulate client and storage node
self.master_port = 10010
self.storage_port = 10020
self.client_port = 11011
self.num_partitions = 1009
self.num_replicas = 2
self.app.operational = False
self.app.load_lock_dict = {}
self.app.pt = PartitionTable(self.num_partitions, self.num_replicas)
def _tearDown(self, success):
self.app.close()
del self.app
super(StorageVerificationHandlerTests, self)._tearDown(success)
# Common methods
def getMasterConnection(self):
return self.getFakeConnection(address=("127.0.0.1", self.master_port))
# Tests
def test_03_connectionClosed(self):
conn = self.getMasterConnection()
self.app.listening_conn = object() # mark as running
self.assertRaises(PrimaryFailure, self.verification.connectionClosed, conn,)
# nothing happens
self.checkNoPacketSent(conn)
def test_08_askPartitionTable(self):
node = self.app.nm.createStorage(
address=("127.7.9.9", 1),
uuid=self.getStorageUUID()
)
self.app.pt.setCell(1, node, CellStates.UP_TO_DATE)
self.assertTrue(self.app.pt.hasOffset(1))
conn = self.getMasterConnection()
self.verification.askPartitionTable(conn)
ptid, row_list = self.checkAnswerPartitionTable(conn, decode=True)
self.assertEqual(len(row_list), 1009)
def test_10_notifyPartitionChanges(self):
# old partition change
conn = self.getMasterConnection()
self.verification.notifyPartitionChanges(conn, 1, ())
self.verification.notifyPartitionChanges(conn, 0, ())
self.assertEqual(self.app.pt.getID(), 1)
# new node
conn = self.getMasterConnection()
new_uuid = self.getStorageUUID()
cell = (0, new_uuid, CellStates.UP_TO_DATE)
self.app.nm.createStorage(uuid=new_uuid)
self.app.pt = PartitionTable(1, 1)
self.app.dm = Mock({ })
ptid = self.getPTID()
# pt updated
self.verification.notifyPartitionChanges(conn, ptid, (cell, ))
# check db update
calls = self.app.dm.mockGetNamedCalls('changePartitionTable')
self.assertEqual(len(calls), 1)
self.assertEqual(calls[0].getParam(0), ptid)
self.assertEqual(calls[0].getParam(1), (cell, ))
def test_13_askUnfinishedTransactions(self):
# client connection with no data
self.app.dm = Mock({
'getUnfinishedTIDList': [],
})
conn = self.getMasterConnection()
self.verification.askUnfinishedTransactions(conn)
(max_tid, tid_list) = self.checkAnswerUnfinishedTransactions(conn, decode=True)
self.assertEqual(len(tid_list), 0)
call_list = self.app.dm.mockGetNamedCalls('getUnfinishedTIDList')
self.assertEqual(len(call_list), 1)
call_list[0].checkArgs()
# client connection with some data
self.app.dm = Mock({
'getUnfinishedTIDList': [p64(4)],
})
conn = self.getMasterConnection()
self.verification.askUnfinishedTransactions(conn)
(max_tid, tid_list) = self.checkAnswerUnfinishedTransactions(conn, decode=True)
self.assertEqual(len(tid_list), 1)
self.assertEqual(u64(tid_list[0]), 4)
def test_14_askTransactionInformation(self):
# ask from client conn with no data
self.app.dm = Mock({
'getTransaction': None,
})
conn = self.getMasterConnection()
tid = p64(1)
self.verification.askTransactionInformation(conn, tid)
code, message = self.checkErrorPacket(conn, decode=True)
self.assertEqual(code, ErrorCodes.TID_NOT_FOUND)
call_list = self.app.dm.mockGetNamedCalls('getTransaction')
self.assertEqual(len(call_list), 1)
call_list[0].checkArgs(tid, all=True)
# input some tmp data and ask from client, must find both transaction
self.app.dm = Mock({
'getTransaction': ([p64(2)], 'u2', 'd2', 'e2', False),
})
conn = self.getMasterConnection()
self.verification.askTransactionInformation(conn, p64(1))
tid, user, desc, ext, packed, oid_list = self.checkAnswerTransactionInformation(conn, decode=True)
self.assertEqual(u64(tid), 1)
self.assertEqual(user, 'u2')
self.assertEqual(desc, 'd2')
self.assertEqual(ext, 'e2')
self.assertFalse(packed)
self.assertEqual(len(oid_list), 1)
self.assertEqual(u64(oid_list[0]), 2)
def test_15_askObjectPresent(self):
# client connection with no data
self.app.dm = Mock({
'objectPresent': False,
})
conn = self.getMasterConnection()
oid, tid = p64(1), p64(2)
self.verification.askObjectPresent(conn, oid, tid)
code, message = self.checkErrorPacket(conn, decode=True)
self.assertEqual(code, ErrorCodes.OID_NOT_FOUND)
call_list = self.app.dm.mockGetNamedCalls('objectPresent')
self.assertEqual(len(call_list), 1)
call_list[0].checkArgs(oid, tid)
# client connection with some data
self.app.dm = Mock({
'objectPresent': True,
})
conn = self.getMasterConnection()
self.verification.askObjectPresent(conn, oid, tid)
oid, tid = self.checkAnswerObjectPresent(conn, decode=True)
self.assertEqual(u64(tid), 2)
self.assertEqual(u64(oid), 1)
def test_16_deleteTransaction(self):
# client connection with no data
self.app.dm = Mock({
'deleteTransaction': None,
})
conn = self.getMasterConnection()
oid_list = [self.getOID(1), self.getOID(2)]
tid = p64(1)
self.verification.deleteTransaction(conn, tid, oid_list)
call_list = self.app.dm.mockGetNamedCalls('deleteTransaction')
self.assertEqual(len(call_list), 1)
call_list[0].checkArgs(tid, oid_list)
def test_17_commitTransaction(self):
# commit a transaction
conn = self.getMasterConnection()
dm = Mock()
self.app.dm = dm
self.verification.commitTransaction(conn, p64(1))
self.assertEqual(len(dm.mockGetNamedCalls("finishTransaction")), 1)
call = dm.mockGetNamedCalls("finishTransaction")[0]
tid = call.getParam(0)
self.assertEqual(u64(tid), 1)
if __name__ == "__main__":
unittest.main()
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/tests/testBootstrap.py 0000664 0000000 0000000 00000004344 12601037530 0026253 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2009-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import unittest
from . import NeoUnitTestBase
from neo.storage.app import Application
from neo.lib.bootstrap import BootstrapManager
from neo.lib.protocol import NodeTypes
class BootstrapManagerTests(NeoUnitTestBase):
def setUp(self):
NeoUnitTestBase.setUp(self)
self.prepareDatabase(number=1)
# create an application object
config = self.getStorageConfiguration()
self.app = Application(config)
self.bootstrap = BootstrapManager(self.app, 'main', NodeTypes.STORAGE)
# define some variable to simulate client and storage node
self.master_port = 10010
self.storage_port = 10020
self.num_partitions = 1009
self.num_replicas = 2
def _tearDown(self, success):
self.app.close()
del self.app
super(BootstrapManagerTests, self)._tearDown(success)
# Tests
def testConnectionCompleted(self):
address = ("127.0.0.1", self.master_port)
conn = self.getFakeConnection(address=address)
self.bootstrap.current = self.app.nm.createMaster(address=address)
self.bootstrap.connectionCompleted(conn)
self.checkRequestIdentification(conn)
def testHandleNotReady(self):
# the primary is not ready
address = ("127.0.0.1", self.master_port)
conn = self.getFakeConnection(address=address)
self.bootstrap.current = self.app.nm.createMaster(address=address)
self.bootstrap.notReady(conn, '')
self.checkClosed(conn)
self.checkNoPacketSent(conn)
if __name__ == "__main__":
unittest.main()
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/tests/testConnection.py 0000664 0000000 0000000 00000107133 12601037530 0026375 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
#
# Copyright (C) 2009-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import unittest
from time import time
from mock import Mock
from neo.lib import connection, logging
from neo.lib.connection import BaseConnection, ListeningConnection, \
Connection, ClientConnection, ServerConnection, MTClientConnection, \
HandlerSwitcher, CRITICAL_TIMEOUT
from neo.lib.connector import registerConnectorHandler
from neo.lib.connector import ConnectorException, ConnectorTryAgainException, \
ConnectorInProgressException, ConnectorConnectionRefusedException
from neo.lib.handler import EventHandler
from neo.lib.protocol import Packets, PACKET_HEADER_FORMAT
from . import NeoUnitTestBase, Patch
connector_cpt = 0
class DummyConnector(Mock):
def __init__(self, addr, s=None):
logging.info("initializing connector")
global connector_cpt
self.desc = connector_cpt
connector_cpt += 1
self.packet_cpt = 0
self.addr = addr
Mock.__init__(self)
def getAddress(self):
return self.addr
def getDescriptor(self):
return self.desc
accept = getError = makeClientConnection = makeListeningConnection = \
receive = send = lambda *args, **kw: None
dummy_connector = Patch(BaseConnection,
ConnectorClass=lambda orig, self, *args, **kw: DummyConnector(*args, **kw))
class ConnectionTests(NeoUnitTestBase):
def setUp(self):
NeoUnitTestBase.setUp(self)
self.app = Mock({'__repr__': 'Fake App'})
self.em = Mock({'__repr__': 'Fake Em'})
self.handler = Mock({'__repr__': 'Fake Handler'})
self.address = ("127.0.0.7", 93413)
self.node = Mock({'getAddress': self.address})
connection.connect_limit = 0
def _makeListeningConnection(self, addr):
with dummy_connector:
conn = ListeningConnection(self.em, self.handler, addr)
self.connector = conn.connector
return conn
def _makeServerConnection(self):
addr = self.address
self.connector = DummyConnector(addr)
return Connection(self.em, self.handler, self.connector, addr)
def _makeClientConnection(self):
with dummy_connector:
conn = ClientConnection(self.em, self.handler, self.node)
self.connector = conn.connector
return conn
_makeConnection = _makeClientConnection
def _checkRegistered(self, n=1):
self.assertEqual(len(self.em.mockGetNamedCalls("register")), n)
def _checkUnregistered(self, n=1):
self.assertEqual(len(self.em.mockGetNamedCalls("unregister")), n)
def _checkReaderRemoved(self, n=1):
self.assertEqual(len(self.em.mockGetNamedCalls("removeReader")), n)
def _checkWriterAdded(self, n=1):
self.assertEqual(len(self.em.mockGetNamedCalls("addWriter")), n)
def _checkWriterRemoved(self, n=1):
self.assertEqual(len(self.em.mockGetNamedCalls("removeWriter")), n)
def _checkClose(self, n=1):
self.assertEqual(len(self.connector.mockGetNamedCalls("close")), n)
def _checkAccept(self, n=1):
calls = self.connector.mockGetNamedCalls('accept')
self.assertEqual(len(calls), n)
def _checkSend(self, n=1, data=None):
calls = self.connector.mockGetNamedCalls('send')
self.assertEqual(len(calls), n)
if n > 1 and data is not None:
data = calls[n-1].getParam(0)
self.assertEqual(data, "testdata")
def _checkConnectionAccepted(self, n=1):
calls = self.handler.mockGetNamedCalls('connectionAccepted')
self.assertEqual(len(calls), n)
def _checkConnectionFailed(self, n=1):
calls = self.handler.mockGetNamedCalls('connectionFailed')
self.assertEqual(len(calls), n)
def _checkConnectionClosed(self, n=1):
calls = self.handler.mockGetNamedCalls('connectionClosed')
self.assertEqual(len(calls), n)
def _checkConnectionStarted(self, n=1):
calls = self.handler.mockGetNamedCalls('connectionStarted')
self.assertEqual(len(calls), n)
def _checkConnectionCompleted(self, n=1):
calls = self.handler.mockGetNamedCalls('connectionCompleted')
self.assertEqual(len(calls), n)
def _checkMakeListeningConnection(self, n=1):
calls = self.connector.mockGetNamedCalls('makeListeningConnection')
self.assertEqual(len(calls), n)
def _checkMakeClientConnection(self, n=1):
calls = self.connector.mockGetNamedCalls("makeClientConnection")
self.assertEqual(len(calls), n)
def _checkPacketReceived(self, n=1):
calls = self.handler.mockGetNamedCalls('packetReceived')
self.assertEqual(len(calls), n)
def _checkReadBuf(self, bc, data):
content = bc.read_buf.read(len(bc.read_buf))
self.assertEqual(''.join(content), data)
def _appendToReadBuf(self, bc, data):
bc.read_buf.append(data)
def _appendPacketToReadBuf(self, bc, packet):
data = ''.join(packet.encode())
bc.read_buf.append(data)
def _checkWriteBuf(self, bc, data):
self.assertEqual(''.join(bc.write_buf), data)
def test_01_BaseConnection(self):
# init with address
bc = self._makeConnection()
self.assertEqual(bc.getAddress(), self.address)
self.assertIsNot(bc.connector, None)
self._checkRegistered(1)
def test_02_ListeningConnection1(self):
# test init part
addr = ("127.0.0.7", 93413)
with Patch(DummyConnector, accept=lambda orig, self: (self, ('', 0))):
bc = self._makeListeningConnection(addr=addr)
self.assertEqual(bc.getAddress(), addr)
self._checkRegistered()
self._checkMakeListeningConnection()
# test readable
bc.readable()
self._checkAccept()
self._checkConnectionAccepted()
def test_02_ListeningConnection2(self):
# test with exception raise when getting new connection
def accept(orig, self):
raise ConnectorTryAgainException
addr = ("127.0.0.7", 93413)
with Patch(DummyConnector, accept=accept):
bc = self._makeListeningConnection(addr=addr)
self.assertEqual(bc.getAddress(), addr)
self._checkRegistered()
self._checkMakeListeningConnection()
# test readable
bc.readable()
self._checkAccept(1)
self._checkConnectionAccepted(0)
def test_03_Connection(self):
bc = self._makeConnection()
self.assertEqual(bc.getAddress(), self.address)
self._checkReadBuf(bc, '')
self._checkWriteBuf(bc, '')
self.assertEqual(bc.cur_id, 0)
self.assertFalse(bc.aborted)
# test uuid
self.assertEqual(bc.uuid, None)
self.assertEqual(bc.getUUID(), None)
uuid = self.getNewUUID(None)
bc.setUUID(uuid)
self.assertEqual(bc.getUUID(), uuid)
# test next id
cur_id = bc.cur_id
next_id = bc._getNextId()
self.assertEqual(next_id, cur_id)
next_id = bc._getNextId()
self.assertTrue(next_id > cur_id)
# test overflow of next id
bc.cur_id = 0xffffffff
next_id = bc._getNextId()
self.assertEqual(next_id, 0xffffffff)
next_id = bc._getNextId()
self.assertEqual(next_id, 0)
def test_Connection_pending(self):
bc = self._makeConnection()
self.assertEqual(''.join(bc.write_buf), '')
self.assertFalse(bc.pending())
bc.write_buf += '1'
self.assertTrue(bc.pending())
def test_Connection_recv1(self):
# patch receive method to return data
with Patch(DummyConnector, receive=lambda orig, self: "testdata"):
bc = self._makeConnection()
self._checkReadBuf(bc, '')
bc._recv()
self._checkReadBuf(bc, 'testdata')
def test_Connection_recv2(self):
# patch receive method to raise try again
def receive(orig, self):
raise ConnectorTryAgainException
with Patch(DummyConnector, receive=receive):
bc = self._makeConnection()
self._checkReadBuf(bc, '')
bc._recv()
self._checkReadBuf(bc, '')
self._checkConnectionClosed(0)
self._checkUnregistered(0)
def test_Connection_recv3(self):
# patch receive method to raise ConnectorConnectionRefusedException
def receive(orig, self):
raise ConnectorConnectionRefusedException
with Patch(DummyConnector, receive=receive):
bc = self._makeConnection()
self._checkReadBuf(bc, '')
# fake client connection instance with connecting attribute
bc.connecting = True
bc._recv()
self._checkReadBuf(bc, '')
self._checkConnectionFailed(1)
self._checkUnregistered(1)
def test_Connection_recv4(self):
# patch receive method to raise any other connector error
def receive(orig, self):
raise ConnectorException
with Patch(DummyConnector, receive=receive):
bc = self._makeConnection()
self._checkReadBuf(bc, '')
self.assertRaises(ConnectorException, bc._recv)
self._checkReadBuf(bc, '')
self._checkConnectionClosed(1)
self._checkUnregistered(1)
def test_Connection_send1(self):
# no data, nothing done
# patch receive method to return data
bc = self._makeConnection()
self._checkWriteBuf(bc, '')
bc._send()
self._checkSend(0)
self._checkConnectionClosed(0)
self._checkUnregistered(0)
def test_Connection_send2(self):
# send all data
with Patch(DummyConnector, send=lambda orig, self, data: len(data)):
bc = self._makeConnection()
self._checkWriteBuf(bc, '')
bc.write_buf = ["testdata"]
bc._send()
self._checkSend(1, "testdata")
self._checkWriteBuf(bc, '')
self._checkConnectionClosed(0)
self._checkUnregistered(0)
def test_Connection_send3(self):
# send part of the data
with Patch(DummyConnector, send=lambda orig, self, data: len(data)//2):
bc = self._makeConnection()
self._checkWriteBuf(bc, '')
bc.write_buf = ["testdata"]
bc._send()
self._checkSend(1, "testdata")
self._checkWriteBuf(bc, 'data')
self._checkConnectionClosed(0)
self._checkUnregistered(0)
def test_Connection_send4(self):
# send multiple packet
with Patch(DummyConnector, send=lambda orig, self, data: len(data)):
bc = self._makeConnection()
self._checkWriteBuf(bc, '')
bc.write_buf = ["testdata", "second", "third"]
bc._send()
self._checkSend(1, "testdatasecondthird")
self._checkWriteBuf(bc, '')
self._checkConnectionClosed(0)
self._checkUnregistered(0)
def test_Connection_send5(self):
# send part of multiple packet
with Patch(DummyConnector, send=lambda orig, self, data: len(data)//2):
bc = self._makeConnection()
self._checkWriteBuf(bc, '')
bc.write_buf = ["testdata", "second", "third"]
bc._send()
self._checkSend(1, "testdatasecondthird")
self._checkWriteBuf(bc, 'econdthird')
self._checkConnectionClosed(0)
self._checkUnregistered(0)
def test_Connection_send6(self):
# raise try again
def send(orig, self, data):
raise ConnectorTryAgainException
with Patch(DummyConnector, send=send):
bc = self._makeConnection()
self._checkWriteBuf(bc, '')
bc.write_buf = ["testdata", "second", "third"]
bc._send()
self._checkSend(1, "testdatasecondthird")
self._checkWriteBuf(bc, 'testdatasecondthird')
self._checkConnectionClosed(0)
self._checkUnregistered(0)
def test_Connection_send7(self):
# raise other error
def send(orig, self, data):
raise ConnectorException
with Patch(DummyConnector, send=send):
bc = self._makeConnection()
self._checkWriteBuf(bc, '')
bc.write_buf = ["testdata", "second", "third"]
self.assertRaises(ConnectorException, bc._send)
self._checkSend(1, "testdatasecondthird")
# connection closed -> buffers flushed
self._checkWriteBuf(bc, '')
self._checkConnectionClosed(1)
self._checkUnregistered(1)
def test_07_Connection_addPacket(self):
# new packet
p = Packets.Ping()
p._id = 0
bc = self._makeConnection()
self._checkWriteBuf(bc, '')
bc._addPacket(p)
self._checkWriteBuf(bc, PACKET_HEADER_FORMAT.pack(0, p._code, 10))
self._checkWriterAdded(1)
def test_Connection_analyse1(self):
# nothing to read, nothing is done
bc = self._makeConnection()
bc._queue = Mock()
self._checkReadBuf(bc, '')
bc._analyse()
self._checkPacketReceived(0)
self._checkReadBuf(bc, '')
p = Packets.AnswerPrimary(self.getNewUUID(None))
p.setId(1)
p_data = ''.join(p.encode())
data_edge = len(p_data) - 1
p_data_1, p_data_2 = p_data[:data_edge], p_data[data_edge:]
# append an incomplete packet, nothing is done
bc.read_buf.append(p_data_1)
bc._analyse()
self._checkPacketReceived(0)
self.assertNotEqual(len(bc.read_buf), 0)
self.assertNotEqual(len(bc.read_buf), len(p_data))
# append the rest of the packet
bc.read_buf.append(p_data_2)
bc._analyse()
# check packet decoded
self.assertEqual(len(bc._queue.mockGetNamedCalls("append")), 1)
call = bc._queue.mockGetNamedCalls("append")[0]
data = call.getParam(0)
self.assertEqual(type(data), type(p))
self.assertEqual(data.getId(), p.getId())
self.assertEqual(data.decode(), p.decode())
self._checkReadBuf(bc, '')
def test_Connection_analyse2(self):
# give multiple packet
bc = self._makeConnection()
bc._queue = Mock()
p1 = Packets.AnswerPrimary(self.getNewUUID(None))
p1.setId(1)
self._appendPacketToReadBuf(bc, p1)
p2 = Packets.AnswerPrimary( self.getNewUUID(None))
p2.setId(2)
self._appendPacketToReadBuf(bc, p2)
self.assertEqual(len(bc.read_buf), len(p1) + len(p2))
bc._analyse()
# check two packets decoded
self.assertEqual(len(bc._queue.mockGetNamedCalls("append")), 2)
# packet 1
call = bc._queue.mockGetNamedCalls("append")[0]
data = call.getParam(0)
self.assertEqual(type(data), type(p1))
self.assertEqual(data.getId(), p1.getId())
self.assertEqual(data.decode(), p1.decode())
# packet 2
call = bc._queue.mockGetNamedCalls("append")[1]
data = call.getParam(0)
self.assertEqual(type(data), type(p2))
self.assertEqual(data.getId(), p2.getId())
self.assertEqual(data.decode(), p2.decode())
self._checkReadBuf(bc, '')
def test_Connection_analyse3(self):
# give a bad packet, won't be decoded
bc = self._makeConnection()
p = Packets.Ping()
p.setId(1)
self._appendToReadBuf(bc, '%s%sdatadatadatadata' % p.encode())
bc._analyse()
self._checkPacketReceived(1) # ping packet
self._checkClose(1) # malformed packet
def test_Connection_analyse4(self):
# give an expected packet
bc = self._makeConnection()
bc._queue = Mock()
p = Packets.AnswerPrimary(self.getNewUUID(None))
p.setId(1)
self._appendPacketToReadBuf(bc, p)
bc._analyse()
# check packet decoded
self.assertEqual(len(bc._queue.mockGetNamedCalls("append")), 1)
call = bc._queue.mockGetNamedCalls("append")[0]
data = call.getParam(0)
self.assertEqual(type(data), type(p))
self.assertEqual(data.getId(), p.getId())
self.assertEqual(data.decode(), p.decode())
self._checkReadBuf(bc, '')
def test_Connection_writable1(self):
# with pending operation after send
with Patch(DummyConnector, send=lambda orig, self, data: len(data)//2):
bc = self._makeConnection()
self._checkWriteBuf(bc, '')
bc.write_buf = ["testdata"]
self.assertTrue(bc.pending())
self.assertFalse(bc.aborted)
bc.writable()
# test send was called
self._checkSend(1, "testdata")
self._checkWriteBuf(bc, "data")
self._checkConnectionClosed(0)
self._checkClose(0)
self._checkUnregistered(0)
# pending, so nothing called
self.assertTrue(bc.pending())
self._checkWriterRemoved(0)
self._checkReaderRemoved(0)
self._checkClose(0)
def test_Connection_writable2(self):
# without pending operation after send
with Patch(DummyConnector, send=lambda orig, self, data: len(data)):
bc = self._makeConnection()
self._checkWriteBuf(bc, '')
bc.write_buf = ["testdata"]
self.assertTrue(bc.pending())
self.assertFalse(bc.aborted)
bc.writable()
# test send was called
self._checkSend(1, "testdata")
self._checkWriteBuf(bc, '')
self._checkConnectionClosed(0)
self._checkClose(0)
self._checkUnregistered(0)
# nothing else pending, so writer has been removed
self.assertFalse(bc.pending())
self._checkWriterRemoved(1)
self._checkReaderRemoved(0)
self._checkClose(0)
def test_Connection_writable3(self):
# without pending operation after send and aborted set to true
with Patch(DummyConnector, send=lambda orig, self, data: len(data)):
bc = self._makeConnection()
self._checkWriteBuf(bc, '')
bc.write_buf = ["testdata"]
self.assertTrue(bc.pending())
bc.abort()
self.assertTrue(bc.aborted)
bc.writable()
# test send was called
self._checkSend(1, "testdata")
self._checkWriteBuf(bc, '')
self._checkConnectionClosed(1)
self._checkClose(1)
self._checkUnregistered(1)
# nothing else pending, so writer has been removed
self.assertFalse(bc.pending())
self._checkClose(1)
def test_Connection_readable(self):
# With aborted set to false
# patch receive method to return data
def receive(orig, self):
p = Packets.AnswerPrimary(self.getNewUUID(None))
p.setId(1)
return ''.join(p.encode())
with Patch(DummyConnector, receive=receive):
bc = self._makeConnection()
bc._queue = Mock({'__len__': 0})
self._checkReadBuf(bc, '')
self.assertFalse(bc.aborted)
bc.readable()
# check packet decoded
self._checkReadBuf(bc, '')
self.assertEqual(len(bc._queue.mockGetNamedCalls("append")), 1)
call = bc._queue.mockGetNamedCalls("append")[0]
data = call.getParam(0)
self.assertEqual(type(data), Packets.AnswerPrimary)
self.assertEqual(data.getId(), 1)
self._checkReadBuf(bc, '')
# check not aborted
self.assertFalse(bc.aborted)
self._checkUnregistered(0)
self._checkWriterRemoved(0)
self._checkReaderRemoved(0)
self._checkClose(0)
def test_ClientConnection_init1(self):
# create a good client connection
bc = self._makeClientConnection()
# check connector created and connection initialize
self.assertFalse(bc.connecting)
self.assertFalse(bc.isServer())
self._checkMakeClientConnection(1)
# check call to handler
self.assertFalse(bc.getHandler() is None)
self._checkConnectionStarted(1)
self._checkConnectionCompleted(1)
self._checkConnectionFailed(0)
# check call to event manager
self.assertIsNot(bc.em, None)
self._checkWriterAdded(0)
def test_ClientConnection_init2(self):
# raise connection in progress
def makeClientConnection(orig, self):
raise ConnectorInProgressException
with Patch(DummyConnector, makeClientConnection=makeClientConnection):
bc = self._makeClientConnection()
# check connector created and connection initialize
self.assertTrue(bc.connecting)
self.assertFalse(bc.isServer())
self._checkMakeClientConnection(1)
# check call to handler
self.assertFalse(bc.getHandler() is None)
self._checkConnectionStarted(1)
self._checkConnectionCompleted(0)
self._checkConnectionFailed(0)
# check call to event manager
self.assertIsNot(bc.em, None)
self._checkWriterAdded(1)
def test_ClientConnection_init3(self):
# raise another error, connection must fail
def makeClientConnection(orig, self):
raise ConnectorException
with Patch(DummyConnector, makeClientConnection=makeClientConnection):
self.assertRaises(ConnectorException, self._makeClientConnection)
# since the exception was raised, the connection is not created
# check call to handler
self._checkConnectionStarted(1)
self._checkConnectionCompleted(0)
self._checkConnectionFailed(1)
# check call to event manager
self._checkWriterAdded(0)
def test_ClientConnection_writable1(self):
# with a non connecting connection, will call parent's method
with Patch(DummyConnector, send=lambda orig, self, data: len(data)), \
Patch(DummyConnector,
makeClientConnection=lambda orig, self: "OK") as p:
bc = self._makeClientConnection()
p.revert()
# check connector created and connection initialize
self.assertFalse(bc.connecting)
self._checkWriteBuf(bc, '')
bc.write_buf = ["testdata"]
self.assertTrue(bc.pending())
self.assertFalse(bc.aborted)
# call
self._checkConnectionCompleted(1)
bc.writable()
self.assertFalse(bc.pending())
self.assertFalse(bc.aborted)
self.assertFalse(bc.connecting)
self._checkSend(1, "testdata")
self._checkConnectionClosed(0)
self._checkConnectionCompleted(1)
self._checkConnectionFailed(0)
self._checkUnregistered(0)
self._checkWriterRemoved(1)
self._checkReaderRemoved(0)
self._checkClose(0)
def test_ClientConnection_writable2(self):
# with a connecting connection, must not call parent's method
# with errors, close connection
with Patch(DummyConnector, getError=lambda orig, self: True):
bc = self._makeClientConnection()
# check connector created and connection initialize
self._checkWriteBuf(bc, '')
bc.write_buf = ["testdata"]
self.assertTrue(bc.pending())
self.assertFalse(bc.aborted)
# call
self._checkConnectionCompleted(1)
bc.writable()
self.assertFalse(bc.connecting)
self.assertFalse(bc.pending())
self.assertFalse(bc.aborted)
self._checkWriteBuf(bc, '')
self._checkConnectionClosed(1)
self._checkConnectionCompleted(1)
self._checkConnectionFailed(0)
self._checkUnregistered(1)
def test_14_ServerConnection(self):
bc = self._makeServerConnection()
self.assertEqual(bc.getAddress(), ("127.0.0.7", 93413))
self._checkReadBuf(bc, '')
self._checkWriteBuf(bc, '')
self.assertEqual(bc.cur_id, 0)
self.assertFalse(bc.aborted)
# test uuid
self.assertEqual(bc.uuid, None)
self.assertEqual(bc.getUUID(), None)
uuid = self.getNewUUID(None)
bc.setUUID(uuid)
self.assertEqual(bc.getUUID(), uuid)
# test next id
cur_id = bc.cur_id
next_id = bc._getNextId()
self.assertEqual(next_id, cur_id)
next_id = bc._getNextId()
self.assertTrue(next_id > cur_id)
# test overflow of next id
bc.cur_id = 0xffffffff
next_id = bc._getNextId()
self.assertEqual(next_id, 0xffffffff)
next_id = bc._getNextId()
self.assertEqual(next_id, 0)
def test_15_Timeout(self):
# NOTE: This method uses ping/pong packets only because MT connection
# don't accept any other packet without specifying a queue.
self.handler = EventHandler(self.app)
conn = self._makeClientConnection()
use_case_list = (
# (a) For a single packet sent at T,
# the limit time for the answer is T + (1 * CRITICAL_TIMEOUT)
((), (1., 0)),
# (b) Same as (a), even if send another packet at (T + CT/2).
# But receiving a packet (at T + CT - ε) resets the timeout
# (which means the limit for the 2nd one is T + 2*CT)
((.5, None), (1., 0, 2., 1)),
# (c) Same as (b) with a first answer at well before the limit
# (T' = T + CT/2). The limit for the second one is T' + CT.
((.1, None, .5, 1), (1.5, 0)),
)
from neo.lib import connection
def set_time(t):
connection.time = lambda: int(CRITICAL_TIMEOUT * (1000 + t))
closed = []
conn.close = lambda: closed.append(connection.time())
def answer(packet_id):
p = Packets.Pong()
p.setId(packet_id)
conn.connector.receive = [''.join(p.encode())].pop
conn.readable()
checkTimeout()
conn.process()
def checkTimeout():
timeout = conn.getTimeout()
if timeout and timeout <= connection.time():
conn.onTimeout()
try:
for use_case, expected in use_case_list:
i = iter(use_case)
conn.cur_id = 0
set_time(0)
# No timeout when no pending request
self.assertEqual(conn._handlers.getNextTimeout(), None)
conn.ask(Packets.Ping())
for t in i:
set_time(t)
checkTimeout()
packet_id = i.next()
if packet_id is None:
conn.ask(Packets.Ping())
else:
answer(packet_id)
i = iter(expected)
for t in i:
set_time(t - .1)
checkTimeout()
set_time(t)
# this test method relies on the fact that only
# conn.close is called in case of a timeout
checkTimeout()
self.assertEqual(closed.pop(), connection.time())
answer(i.next())
self.assertFalse(conn.isPending())
self.assertFalse(closed)
finally:
connection.time = time
class MTConnectionTests(ConnectionTests):
# XXX: here we test non-client-connection-related things too, which
# duplicates test suite work... Should be fragmented into finer-grained
# test classes.
def setUp(self):
super(MTConnectionTests, self).setUp()
self.dispatcher = Mock({'__repr__': 'Fake Dispatcher'})
def _makeClientConnection(self):
with dummy_connector:
conn = MTClientConnection(self.em, self.handler, self.node,
dispatcher=self.dispatcher)
self.connector = conn.connector
return conn
def test_MTClientConnectionQueueParameter(self):
ask = self._makeClientConnection().ask
packet = Packets.AskPrimary() # Any non-Ping simple "ask" packet
# One cannot "ask" anything without a queue
self.assertRaises(TypeError, ask, packet)
ask(packet, queue=object())
# ... except Ping
ask(Packets.Ping())
class HandlerSwitcherTests(NeoUnitTestBase):
def setUp(self):
NeoUnitTestBase.setUp(self)
self._handler = handler = Mock({
'__repr__': 'initial handler',
})
self._connection = Mock({
'__repr__': 'connection',
'getAddress': ('127.0.0.1', 10000),
})
self._handlers = HandlerSwitcher(handler)
def _makeNotification(self, msg_id):
packet = Packets.StartOperation()
packet.setId(msg_id)
return packet
def _makeRequest(self, msg_id):
packet = Packets.AskBeginTransaction()
packet.setId(msg_id)
return packet
def _makeAnswer(self, msg_id):
packet = Packets.AnswerBeginTransaction(self.getNextTID())
packet.setId(msg_id)
return packet
def _makeHandler(self):
return Mock({'__repr__': 'handler'})
def _checkPacketReceived(self, handler, packet, index=0):
calls = handler.mockGetNamedCalls('packetReceived')
self.assertEqual(len(calls), index + 1)
def _checkCurrentHandler(self, handler):
self.assertTrue(self._handlers.getHandler() is handler)
def testInit(self):
self._checkCurrentHandler(self._handler)
self.assertFalse(self._handlers.isPending())
def testEmit(self):
# First case, emit is called outside of a handler
self.assertFalse(self._handlers.isPending())
request = self._makeRequest(1)
self._handlers.emit(request, 0, None)
self.assertTrue(self._handlers.isPending())
# Second case, emit is called from inside a handler with a pending
# handler change.
new_handler = self._makeHandler()
applied = self._handlers.setHandler(new_handler)
self.assertFalse(applied)
self._checkCurrentHandler(self._handler)
call_tracker = []
def packetReceived(conn, packet, kw):
self._handlers.emit(self._makeRequest(2), 0, None)
call_tracker.append(True)
self._handler.packetReceived = packetReceived
self._handlers.handle(self._connection, self._makeAnswer(1))
self.assertEqual(call_tracker, [True])
# Effective handler must not have changed (new request is blocking
# it)
self._checkCurrentHandler(self._handler)
# Handling the next response will cause the handler to change
delattr(self._handler, 'packetReceived')
self._handlers.handle(self._connection, self._makeAnswer(2))
self._checkCurrentHandler(new_handler)
def testHandleNotification(self):
# handle with current handler
notif1 = self._makeNotification(1)
self._handlers.handle(self._connection, notif1)
self._checkPacketReceived(self._handler, notif1)
# emit a request and delay an handler
request = self._makeRequest(2)
self._handlers.emit(request, 0, None)
handler = self._makeHandler()
applied = self._handlers.setHandler(handler)
self.assertFalse(applied)
# next notification fall into the current handler
notif2 = self._makeNotification(3)
self._handlers.handle(self._connection, notif2)
self._checkPacketReceived(self._handler, notif2, index=1)
# handle with new handler
answer = self._makeAnswer(2)
self._handlers.handle(self._connection, answer)
notif3 = self._makeNotification(4)
self._handlers.handle(self._connection, notif3)
self._checkPacketReceived(handler, notif2)
def testHandleAnswer1(self):
# handle with current handler
request = self._makeRequest(1)
self._handlers.emit(request, 0, None)
answer = self._makeAnswer(1)
self._handlers.handle(self._connection, answer)
self._checkPacketReceived(self._handler, answer)
def testHandleAnswer2(self):
# handle with blocking handler
request = self._makeRequest(1)
self._handlers.emit(request, 0, None)
handler = self._makeHandler()
applied = self._handlers.setHandler(handler)
self.assertFalse(applied)
answer = self._makeAnswer(1)
self._handlers.handle(self._connection, answer)
self._checkPacketReceived(self._handler, answer)
self._checkCurrentHandler(handler)
def testHandleAnswer3(self):
# multiple setHandler
r1 = self._makeRequest(1)
r2 = self._makeRequest(2)
r3 = self._makeRequest(3)
a1 = self._makeAnswer(1)
a2 = self._makeAnswer(2)
a3 = self._makeAnswer(3)
h1 = self._makeHandler()
h2 = self._makeHandler()
h3 = self._makeHandler()
# emit all requests and setHandleres
self._handlers.emit(r1, 0, None)
applied = self._handlers.setHandler(h1)
self.assertFalse(applied)
self._handlers.emit(r2, 0, None)
applied = self._handlers.setHandler(h2)
self.assertFalse(applied)
self._handlers.emit(r3, 0, None)
applied = self._handlers.setHandler(h3)
self.assertFalse(applied)
self._checkCurrentHandler(self._handler)
self.assertTrue(self._handlers.isPending())
# process answers
self._handlers.handle(self._connection, a1)
self._checkCurrentHandler(h1)
self._handlers.handle(self._connection, a2)
self._checkCurrentHandler(h2)
self._handlers.handle(self._connection, a3)
self._checkCurrentHandler(h3)
def testHandleAnswer4(self):
# process out of order
r1 = self._makeRequest(1)
r2 = self._makeRequest(2)
r3 = self._makeRequest(3)
a1 = self._makeAnswer(1)
a2 = self._makeAnswer(2)
a3 = self._makeAnswer(3)
h = self._makeHandler()
# emit all requests
self._handlers.emit(r1, 0, None)
self._handlers.emit(r2, 0, None)
self._handlers.emit(r3, 0, None)
applied = self._handlers.setHandler(h)
self.assertFalse(applied)
# process answers
self._handlers.handle(self._connection, a1)
self._checkCurrentHandler(self._handler)
self._handlers.handle(self._connection, a2)
self._checkCurrentHandler(self._handler)
self._handlers.handle(self._connection, a3)
self._checkCurrentHandler(h)
def testHandleUnexpected(self):
# process out of order
r1 = self._makeRequest(1)
r2 = self._makeRequest(2)
a2 = self._makeAnswer(2)
h = self._makeHandler()
# emit requests aroung state setHandler
self._handlers.emit(r1, 0, None)
applied = self._handlers.setHandler(h)
self.assertFalse(applied)
self._handlers.emit(r2, 0, None)
# process answer for next state
self._handlers.handle(self._connection, a2)
self.checkAborted(self._connection)
if __name__ == '__main__':
unittest.main()
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/tests/testDispatcher.py 0000664 0000000 0000000 00000013271 12601037530 0026363 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2009-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
from mock import Mock
from . import NeoTestBase
from neo.lib.dispatcher import Dispatcher, ForgottenPacket
from Queue import Queue
import unittest
class DispatcherTests(NeoTestBase):
def setUp(self):
NeoTestBase.setUp(self)
self.dispatcher = Dispatcher()
def testRegister(self):
conn = object()
queue = Queue()
MARKER = object()
self.dispatcher.register(conn, 1, queue)
self.assertTrue(queue.empty())
self.assertTrue(self.dispatcher.dispatch(conn, 1, MARKER, {}))
self.assertFalse(queue.empty())
self.assertEqual(queue.get(block=False), (conn, MARKER, {}))
self.assertTrue(queue.empty())
self.assertFalse(self.dispatcher.dispatch(conn, 2, None, {}))
def testUnregister(self):
conn = object()
queue = Mock()
self.dispatcher.register(conn, 2, queue)
self.dispatcher.unregister(conn)
self.assertEqual(len(queue.mockGetNamedCalls('put')), 1)
self.assertFalse(self.dispatcher.dispatch(conn, 2, None, {}))
def testRegistered(self):
conn1 = object()
conn2 = object()
self.assertFalse(self.dispatcher.registered(conn1))
self.assertFalse(self.dispatcher.registered(conn2))
self.dispatcher.register(conn1, 1, Mock())
self.assertTrue(self.dispatcher.registered(conn1))
self.assertFalse(self.dispatcher.registered(conn2))
self.dispatcher.register(conn2, 2, Mock())
self.assertTrue(self.dispatcher.registered(conn1))
self.assertTrue(self.dispatcher.registered(conn2))
self.dispatcher.unregister(conn1)
self.assertFalse(self.dispatcher.registered(conn1))
self.assertTrue(self.dispatcher.registered(conn2))
self.dispatcher.unregister(conn2)
self.assertFalse(self.dispatcher.registered(conn1))
self.assertFalse(self.dispatcher.registered(conn2))
def testPending(self):
conn1 = object()
conn2 = object()
class Queue(object):
_empty = True
def empty(self):
return self._empty
def put(self, value):
pass
queue1 = Queue()
queue2 = Queue()
self.dispatcher.register(conn1, 1, queue1)
self.assertTrue(self.dispatcher.pending(queue1))
self.dispatcher.register(conn2, 2, queue1)
self.assertTrue(self.dispatcher.pending(queue1))
self.dispatcher.register(conn2, 3, queue2)
self.assertTrue(self.dispatcher.pending(queue1))
self.assertTrue(self.dispatcher.pending(queue2))
self.dispatcher.dispatch(conn1, 1, None, {})
self.assertTrue(self.dispatcher.pending(queue1))
self.assertTrue(self.dispatcher.pending(queue2))
self.dispatcher.dispatch(conn2, 2, None, {})
self.assertFalse(self.dispatcher.pending(queue1))
self.assertTrue(self.dispatcher.pending(queue2))
queue1._empty = False
self.assertTrue(self.dispatcher.pending(queue1))
queue1._empty = True
self.dispatcher.register(conn1, 4, queue1)
self.dispatcher.register(conn2, 5, queue1)
self.assertTrue(self.dispatcher.pending(queue1))
self.assertTrue(self.dispatcher.pending(queue2))
self.dispatcher.unregister(conn2)
self.assertTrue(self.dispatcher.pending(queue1))
self.assertFalse(self.dispatcher.pending(queue2))
self.dispatcher.unregister(conn1)
self.assertFalse(self.dispatcher.pending(queue1))
self.assertFalse(self.dispatcher.pending(queue2))
def testForget(self):
conn = object()
queue = Queue()
MARKER = object()
# Register an expectation
self.dispatcher.register(conn, 1, queue)
# ...and forget about it, returning registered queue
forgotten_queue = self.dispatcher.forget(conn, 1)
self.assertTrue(queue is forgotten_queue, (queue, forgotten_queue))
# A ForgottenPacket must have been put in the queue
queue_conn, packet, kw = queue.get(block=False)
self.assertTrue(isinstance(packet, ForgottenPacket), packet)
# ...with appropriate packet id
self.assertEqual(packet.getId(), 1)
# ...and appropriate connection
self.assertTrue(conn is queue_conn, (conn, queue_conn))
# If forgotten twice, it must raise a KeyError
self.assertRaises(KeyError, self.dispatcher.forget, conn, 1)
# Event arrives, return value must be True (it was expected)
self.assertTrue(self.dispatcher.dispatch(conn, 1, MARKER, {}))
# ...but must not have reached the queue
self.assertTrue(queue.empty())
# Register an expectation
self.dispatcher.register(conn, 1, queue)
# ...and forget about it
self.dispatcher.forget(conn, 1)
queue.get(block=False)
# No exception must happen if connection is lost.
self.dispatcher.unregister(conn)
# Forgotten message's queue must not have received a "None"
self.assertTrue(queue.empty())
if __name__ == '__main__':
unittest.main()
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/tests/testHandler.py 0000664 0000000 0000000 00000005643 12601037530 0025656 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2009-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import unittest
from mock import Mock
from . import NeoUnitTestBase
from neo.lib.handler import EventHandler
from neo.lib.protocol import PacketMalformedError, UnexpectedPacketError, \
BrokenNodeDisallowedError, NotReadyError, ProtocolError
class HandlerTests(NeoUnitTestBase):
def setUp(self):
NeoUnitTestBase.setUp(self)
app = Mock()
self.handler = EventHandler(app)
def setFakeMethod(self, method):
self.handler.fake_method = method
def getFakePacket(self):
p = Mock({
'decode': (),
'__repr__': 'Fake Packet',
})
p.handler_method_name = 'fake_method'
return p
def test_dispatch(self):
conn = self.getFakeConnection()
packet = self.getFakePacket()
# all is ok
self.setFakeMethod(lambda c: None)
self.handler.dispatch(conn, packet)
# raise UnexpectedPacketError
conn.mockCalledMethods = {}
def fake(c):
raise UnexpectedPacketError('fake packet')
self.setFakeMethod(fake)
self.handler.dispatch(conn, packet)
self.checkErrorPacket(conn)
self.checkAborted(conn)
# raise PacketMalformedError
conn.mockCalledMethods = {}
def fake(c):
raise PacketMalformedError('message')
self.setFakeMethod(fake)
self.handler.dispatch(conn, packet)
self.checkClosed(conn)
# raise BrokenNodeDisallowedError
conn.mockCalledMethods = {}
def fake(c):
raise BrokenNodeDisallowedError
self.setFakeMethod(fake)
self.handler.dispatch(conn, packet)
self.checkErrorPacket(conn)
self.checkAborted(conn)
# raise NotReadyError
conn.mockCalledMethods = {}
def fake(c):
raise NotReadyError
self.setFakeMethod(fake)
self.handler.dispatch(conn, packet)
self.checkErrorPacket(conn)
self.checkAborted(conn)
# raise ProtocolError
conn.mockCalledMethods = {}
def fake(c):
raise ProtocolError
self.setFakeMethod(fake)
self.handler.dispatch(conn, packet)
self.checkErrorPacket(conn)
self.checkAborted(conn)
if __name__ == '__main__':
unittest.main()
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/tests/testNodes.py 0000664 0000000 0000000 00000034053 12601037530 0025346 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2009-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import unittest
from mock import Mock
from neo.lib import protocol
from neo.lib.protocol import NodeTypes, NodeStates
from neo.lib.node import Node, MasterNode, StorageNode, \
ClientNode, AdminNode, NodeManager, MasterDB
from . import NeoUnitTestBase, getTempDirectory
from time import time
from os import chmod, mkdir, rmdir, unlink
from os.path import join, exists
class NodesTests(NeoUnitTestBase):
def setUp(self):
NeoUnitTestBase.setUp(self)
self.manager = Mock()
def _updatedByAddress(self, node, index=0):
calls = self.manager.mockGetNamedCalls('_updateAddress')
self.assertEqual(len(calls), index + 1)
self.assertEqual(calls[index].getParam(0), node)
def _updatedByUUID(self, node, index=0):
calls = self.manager.mockGetNamedCalls('_updateUUID')
self.assertEqual(len(calls), index + 1)
self.assertEqual(calls[index].getParam(0), node)
def testInit(self):
""" Check the node initialization """
address = ('127.0.0.1', 10000)
uuid = self.getNewUUID(None)
node = Node(self.manager, address=address, uuid=uuid)
self.assertEqual(node.getState(), NodeStates.UNKNOWN)
self.assertEqual(node.getAddress(), address)
self.assertEqual(node.getUUID(), uuid)
self.assertTrue(time() - 1 < node.getLastStateChange() < time())
def testState(self):
""" Check if the last changed time is updated when state is changed """
node = Node(self.manager)
self.assertEqual(node.getState(), NodeStates.UNKNOWN)
self.assertTrue(time() - 1 < node.getLastStateChange() < time())
previous_time = node.getLastStateChange()
node.setState(NodeStates.RUNNING)
self.assertEqual(node.getState(), NodeStates.RUNNING)
self.assertTrue(previous_time < node.getLastStateChange())
self.assertTrue(time() - 1 < node.getLastStateChange() < time())
def testAddress(self):
""" Check if the node is indexed by address """
node = Node(self.manager)
self.assertEqual(node.getAddress(), None)
address = ('127.0.0.1', 10000)
node.setAddress(address)
self._updatedByAddress(node)
def testUUID(self):
""" As for Address but UUID """
node = Node(self.manager)
self.assertEqual(node.getAddress(), None)
uuid = self.getNewUUID(None)
node.setUUID(uuid)
self._updatedByUUID(node)
def testTypes(self):
""" Check that the abstract node has no type """
node = Node(self.manager)
self.assertRaises(NotImplementedError, node.getType)
self.assertFalse(node.isStorage())
self.assertFalse(node.isMaster())
self.assertFalse(node.isClient())
self.assertFalse(node.isAdmin())
def testMaster(self):
""" Check Master sub class """
node = MasterNode(self.manager)
self.assertEqual(node.getType(), protocol.NodeTypes.MASTER)
self.assertTrue(node.isMaster())
self.assertFalse(node.isStorage())
self.assertFalse(node.isClient())
self.assertFalse(node.isAdmin())
def testStorage(self):
""" Check Storage sub class """
node = StorageNode(self.manager)
self.assertEqual(node.getType(), protocol.NodeTypes.STORAGE)
self.assertTrue(node.isStorage())
self.assertFalse(node.isMaster())
self.assertFalse(node.isClient())
self.assertFalse(node.isAdmin())
def testClient(self):
""" Check Client sub class """
node = ClientNode(self.manager)
self.assertEqual(node.getType(), protocol.NodeTypes.CLIENT)
self.assertTrue(node.isClient())
self.assertFalse(node.isMaster())
self.assertFalse(node.isStorage())
self.assertFalse(node.isAdmin())
def testAdmin(self):
""" Check Admin sub class """
node = AdminNode(self.manager)
self.assertEqual(node.getType(), protocol.NodeTypes.ADMIN)
self.assertTrue(node.isAdmin())
self.assertFalse(node.isMaster())
self.assertFalse(node.isStorage())
self.assertFalse(node.isClient())
class NodeManagerTests(NeoUnitTestBase):
def setUp(self):
NeoUnitTestBase.setUp(self)
self.manager = NodeManager()
def _addStorage(self):
self.storage = StorageNode(self.manager, ('127.0.0.1', 1000), self.getStorageUUID())
def _addMaster(self):
self.master = MasterNode(self.manager, ('127.0.0.1', 2000), self.getMasterUUID())
def _addClient(self):
self.client = ClientNode(self.manager, None, self.getClientUUID())
def _addAdmin(self):
self.admin = AdminNode(self.manager, ('127.0.0.1', 4000), self.getAdminUUID())
def checkNodes(self, node_list):
manager = self.manager
self.assertEqual(sorted(manager.getList()), sorted(node_list))
def checkMasters(self, master_list):
manager = self.manager
self.assertEqual(manager.getMasterList(), master_list)
def checkStorages(self, storage_list):
manager = self.manager
self.assertEqual(manager.getStorageList(), storage_list)
def checkClients(self, client_list):
manager = self.manager
self.assertEqual(manager.getClientList(), client_list)
def checkByServer(self, node):
node_found = self.manager.getByAddress(node.getAddress())
self.assertEqual(node_found, node)
def checkByUUID(self, node):
node_found = self.manager.getByUUID(node.getUUID())
self.assertEqual(node_found, node)
def checkIdentified(self, node_list, pool_set=None):
identified_node_list = self.manager.getIdentifiedList(pool_set)
self.assertEqual(set(identified_node_list), set(node_list))
def testInit(self):
""" Check the manager is empty when started """
manager = self.manager
self.checkNodes([])
self.checkMasters([])
self.checkStorages([])
self.checkClients([])
address = ('127.0.0.1', 10000)
self.assertEqual(manager.getByAddress(address), None)
self.assertEqual(manager.getByAddress(None), None)
uuid = self.getNewUUID(None)
self.assertEqual(manager.getByUUID(uuid), None)
self.assertEqual(manager.getByUUID(None), None)
def testAdd(self):
""" Check if new nodes are registered in the manager """
manager = self.manager
self.checkNodes([])
# storage
self._addStorage()
self.checkNodes([self.storage])
self.checkStorages([self.storage])
self.checkMasters([])
self.checkClients([])
self.checkByServer(self.storage)
self.checkByUUID(self.storage)
# master
self._addMaster()
self.checkNodes([self.storage, self.master])
self.checkStorages([self.storage])
self.checkMasters([self.master])
self.checkClients([])
self.checkByServer(self.master)
self.checkByUUID(self.master)
# client
self._addClient()
self.checkNodes([self.storage, self.master, self.client])
self.checkStorages([self.storage])
self.checkMasters([self.master])
self.checkClients([self.client])
# client node has no address
self.assertEqual(manager.getByAddress(self.client.getAddress()), None)
self.checkByUUID(self.client)
# admin
self._addAdmin()
self.checkNodes([self.storage, self.master, self.client, self.admin])
self.checkStorages([self.storage])
self.checkMasters([self.master])
self.checkClients([self.client])
self.checkByServer(self.admin)
self.checkByUUID(self.admin)
def testUpdate(self):
""" Check manager content update """
# set up four nodes
manager = self.manager
self._addMaster()
self._addStorage()
self._addClient()
self._addAdmin()
self.checkNodes([self.master, self.storage, self.client, self.admin])
self.checkMasters([self.master])
self.checkStorages([self.storage])
self.checkClients([self.client])
# build changes
old_address = self.master.getAddress()
new_address = ('127.0.0.1', 2001)
old_uuid = self.storage.getUUID()
new_uuid = self.getStorageUUID()
node_list = (
(NodeTypes.CLIENT, None, self.client.getUUID(), NodeStates.DOWN),
(NodeTypes.MASTER, new_address, self.master.getUUID(), NodeStates.RUNNING),
(NodeTypes.STORAGE, self.storage.getAddress(), new_uuid,
NodeStates.RUNNING),
(NodeTypes.ADMIN, self.admin.getAddress(), self.admin.getUUID(),
NodeStates.UNKNOWN),
)
# update manager content
manager.update(node_list)
# - the client gets down
self.checkClients([])
# - master change it's address
self.checkMasters([self.master])
self.assertEqual(manager.getByAddress(old_address), None)
self.master.setAddress(new_address)
self.checkByServer(self.master)
# - storage change it's UUID
storage_list = manager.getStorageList()
self.assertTrue(len(storage_list), 1)
new_storage = storage_list[0]
self.assertNotEqual(new_storage.getUUID(), old_uuid)
self.assertEqual(new_storage.getState(), NodeStates.RUNNING)
# admin is still here but in UNKNOWN state
self.checkNodes([self.master, self.admin, new_storage])
self.assertEqual(self.admin.getState(), NodeStates.UNKNOWN)
def testIdentified(self):
# set up four nodes
manager = self.manager
self._addMaster()
self._addStorage()
self._addClient()
self._addAdmin()
# switch node to connected
self.checkIdentified([])
self.master.setConnection(Mock())
self.checkIdentified([self.master])
self.storage.setConnection(Mock())
self.checkIdentified([self.master, self.storage])
self.client.setConnection(Mock())
self.checkIdentified([self.master, self.storage, self.client])
self.admin.setConnection(Mock())
self.checkIdentified([self.master, self.storage, self.client, self.admin])
# check the pool_set attribute
self.checkIdentified([self.master], pool_set=[self.master.getUUID()])
self.checkIdentified([self.storage], pool_set=[self.storage.getUUID()])
self.checkIdentified([self.client], pool_set=[self.client.getUUID()])
self.checkIdentified([self.admin], pool_set=[self.admin.getUUID()])
self.checkIdentified([self.master, self.storage], pool_set=[
self.master.getUUID(), self.storage.getUUID()])
class MasterDBTests(NeoUnitTestBase):
def _checkMasterDB(self, path, expected_master_list):
db = list(MasterDB(path))
db_set = set(db)
# Generic sanity check
self.assertEqual(len(db), len(db_set))
self.assertEqual(db_set, set(expected_master_list))
def testInitialAccessRights(self):
"""
Verify MasterDB raises immediately on instanciation if it cannot
create a non-existing database. This does not guarantee any later
open will succeed, but makes the simple error case obvious.
"""
temp_dir = getTempDirectory()
directory = join(temp_dir, 'read_only')
db_file = join(directory, 'not_created')
mkdir(directory, 0400)
try:
self.assertRaises(IOError, MasterDB, db_file)
finally:
rmdir(directory)
def testLaterAccessRights(self):
"""
Verify MasterDB does not raise when modifying database.
"""
temp_dir = getTempDirectory()
directory = join(temp_dir, 'read_write')
db_file = join(directory, 'db')
mkdir(directory)
try:
db = MasterDB(db_file)
self.assertTrue(exists(db_file), db_file)
chmod(db_file, 0400)
address = ('example.com', 1024)
# Must not raise
db.add(address)
# Value is stored
self.assertTrue(address in db, [x for x in db])
# But not visible to a new db instance (write access restored so
# it can be created)
chmod(db_file, 0600)
db2 = MasterDB(db_file)
self.assertFalse(address in db2, [x for x in db2])
finally:
if exists(db_file):
unlink(db_file)
rmdir(directory)
def testPersistence(self):
temp_dir = getTempDirectory()
directory = join(temp_dir, 'read_write')
db_file = join(directory, 'db')
mkdir(directory)
try:
db = MasterDB(db_file)
self.assertTrue(exists(db_file), db_file)
address = ('example.com', 1024)
db.add(address)
address2 = ('example.org', 1024)
db.add(address2)
# Values are visible to a new db instance
db2 = MasterDB(db_file)
self.assertTrue(address in db2, [x for x in db2])
self.assertTrue(address2 in db2, [x for x in db2])
db.discard(address)
# Create yet another instance (file is not supposed to be shared)
db3 = MasterDB(db_file)
self.assertFalse(address in db3, [x for x in db3])
self.assertTrue(address2 in db3, [x for x in db3])
finally:
if exists(db_file):
unlink(db_file)
rmdir(directory)
if __name__ == '__main__':
unittest.main()
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/tests/testPT.py 0000664 0000000 0000000 00000041037 12601037530 0024621 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2009-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import unittest
from mock import Mock
from neo.lib.protocol import NodeStates, CellStates
from neo.lib.pt import Cell, PartitionTable, PartitionTableException
from neo.lib.node import StorageNode
from . import NeoUnitTestBase
class PartitionTableTests(NeoUnitTestBase):
def test_01_Cell(self):
uuid = self.getStorageUUID()
server = ("127.0.0.1", 19001)
sn = StorageNode(Mock(), server, uuid)
cell = Cell(sn)
self.assertEqual(cell.node, sn)
self.assertEqual(cell.state, CellStates.UP_TO_DATE)
cell = Cell(sn, CellStates.OUT_OF_DATE)
self.assertEqual(cell.node, sn)
self.assertEqual(cell.state, CellStates.OUT_OF_DATE)
# check getter
self.assertEqual(cell.getNode(), sn)
self.assertEqual(cell.getState(), CellStates.OUT_OF_DATE)
self.assertEqual(cell.getNodeState(), NodeStates.UNKNOWN)
self.assertEqual(cell.getUUID(), uuid)
self.assertEqual(cell.getAddress(), server)
# check state setter
cell.setState(CellStates.FEEDING)
self.assertEqual(cell.getState(), CellStates.FEEDING)
def test_03_setCell(self):
num_partitions = 5
num_replicas = 2
pt = PartitionTable(num_partitions, num_replicas)
uuid1 = self.getStorageUUID()
server1 = ("127.0.0.1", 19001)
sn1 = StorageNode(Mock(), server1, uuid1)
for x in xrange(num_partitions):
self.assertEqual(len(pt.partition_list[x]), 0)
# add a cell to an empty row
self.assertFalse(pt.count_dict.has_key(sn1))
pt.setCell(0, sn1, CellStates.UP_TO_DATE)
self.assertTrue(pt.count_dict.has_key(sn1))
self.assertEqual(pt.count_dict[sn1], 1)
for x in xrange(num_partitions):
if x == 0:
self.assertEqual(len(pt.partition_list[x]), 1)
cell = pt.partition_list[x][0]
self.assertEqual(cell.getState(), CellStates.UP_TO_DATE)
else:
self.assertEqual(len(pt.partition_list[x]), 0)
# try to add to an unexistant partition
self.assertRaises(IndexError, pt.setCell, 10, sn1, CellStates.UP_TO_DATE)
# if we add in discardes state, must be removed
pt.setCell(0, sn1, CellStates.DISCARDED)
for x in xrange(num_partitions):
self.assertEqual(len(pt.partition_list[x]), 0)
self.assertEqual(pt.count_dict[sn1], 0)
# add a feeding node into empty row
pt.setCell(0, sn1, CellStates.FEEDING)
self.assertTrue(pt.count_dict.has_key(sn1))
self.assertEqual(pt.count_dict[sn1], 0)
for x in xrange(num_partitions):
if x == 0:
self.assertEqual(len(pt.partition_list[x]), 1)
cell = pt.partition_list[x][0]
self.assertEqual(cell.getState(), CellStates.FEEDING)
else:
self.assertEqual(len(pt.partition_list[x]), 0)
# re-add it as feeding, nothing change
pt.setCell(0, sn1, CellStates.FEEDING)
self.assertTrue(pt.count_dict.has_key(sn1))
self.assertEqual(pt.count_dict[sn1], 0)
for x in xrange(num_partitions):
if x == 0:
self.assertEqual(len(pt.partition_list[x]), 1)
cell = pt.partition_list[x][0]
self.assertEqual(cell.getState(), CellStates.FEEDING)
else:
self.assertEqual(len(pt.partition_list[x]), 0)
# now add it as up to date
pt.setCell(0, sn1, CellStates.UP_TO_DATE)
self.assertTrue(pt.count_dict.has_key(sn1))
self.assertEqual(pt.count_dict[sn1], 1)
for x in xrange(num_partitions):
if x == 0:
self.assertEqual(len(pt.partition_list[x]), 1)
cell = pt.partition_list[x][0]
self.assertEqual(cell.getState(), CellStates.UP_TO_DATE)
else:
self.assertEqual(len(pt.partition_list[x]), 0)
# now add broken and down state, must not be taken into account
pt.setCell(0, sn1, CellStates.DISCARDED)
for x in xrange(num_partitions):
self.assertEqual(len(pt.partition_list[x]), 0)
self.assertEqual(pt.count_dict[sn1], 0)
sn1.setState(NodeStates.BROKEN)
self.assertRaises(PartitionTableException, pt.setCell,
0, sn1, CellStates.UP_TO_DATE)
for x in xrange(num_partitions):
self.assertEqual(len(pt.partition_list[x]), 0)
self.assertEqual(pt.count_dict[sn1], 0)
sn1.setState(NodeStates.DOWN)
self.assertRaises(PartitionTableException, pt.setCell,
0, sn1, CellStates.UP_TO_DATE)
for x in xrange(num_partitions):
self.assertEqual(len(pt.partition_list[x]), 0)
self.assertEqual(pt.count_dict[sn1], 0)
def test_04_removeCell(self):
num_partitions = 5
num_replicas = 2
pt = PartitionTable(num_partitions, num_replicas)
uuid1 = self.getStorageUUID()
server1 = ("127.0.0.1", 19001)
sn1 = StorageNode(Mock(), server1, uuid1)
for x in xrange(num_partitions):
self.assertEqual(len(pt.partition_list[x]), 0)
# add a cell to an empty row
self.assertFalse(pt.count_dict.has_key(sn1))
pt.setCell(0, sn1, CellStates.UP_TO_DATE)
self.assertTrue(pt.count_dict.has_key(sn1))
self.assertEqual(pt.count_dict[sn1], 1)
for x in xrange(num_partitions):
if x == 0:
self.assertEqual(len(pt.partition_list[x]), 1)
else:
self.assertEqual(len(pt.partition_list[x]), 0)
# remove it
pt.removeCell(0, sn1)
self.assertEqual(pt.count_dict[sn1], 0)
for x in xrange(num_partitions):
self.assertEqual(len(pt.partition_list[x]), 0)
# add a feeding cell
pt.setCell(0, sn1, CellStates.FEEDING)
self.assertTrue(pt.count_dict.has_key(sn1))
self.assertEqual(pt.count_dict[sn1], 0)
for x in xrange(num_partitions):
if x == 0:
self.assertEqual(len(pt.partition_list[x]), 1)
else:
self.assertEqual(len(pt.partition_list[x]), 0)
# remove it
pt.removeCell(0, sn1)
self.assertEqual(pt.count_dict[sn1], 0)
for x in xrange(num_partitions):
self.assertEqual(len(pt.partition_list[x]), 0)
def test_05_getCellList(self):
num_partitions = 5
num_replicas = 2
pt = PartitionTable(num_partitions, num_replicas)
# add two kind of node, usable and unsable
uuid1 = self.getStorageUUID()
server1 = ("127.0.0.1", 19001)
sn1 = StorageNode(Mock(), server1, uuid1)
pt.setCell(0, sn1, CellStates.UP_TO_DATE)
uuid2 = self.getStorageUUID()
server2 = ("127.0.0.2", 19001)
sn2 = StorageNode(Mock(), server2, uuid2)
pt.setCell(0, sn2, CellStates.OUT_OF_DATE)
uuid3 = self.getStorageUUID()
server3 = ("127.0.0.3", 19001)
sn3 = StorageNode(Mock(), server3, uuid3)
pt.setCell(0, sn3, CellStates.FEEDING)
uuid4 = self.getStorageUUID()
server4 = ("127.0.0.4", 19001)
sn4 = StorageNode(Mock(), server4, uuid4)
pt.setCell(0, sn4, CellStates.DISCARDED) # won't be added
# now checks result
self.assertEqual(len(pt.partition_list[0]), 3)
for x in xrange(num_partitions):
if x == 0:
# all nodes
all_cell = pt.getCellList(0)
all_nodes = [x.getNode() for x in all_cell]
self.assertEqual(len(all_cell), 3)
self.assertTrue(sn1 in all_nodes)
self.assertTrue(sn2 in all_nodes)
self.assertTrue(sn3 in all_nodes)
self.assertTrue(sn4 not in all_nodes)
# readable nodes
all_cell = pt.getCellList(0, readable=True)
all_nodes = [x.getNode() for x in all_cell]
self.assertEqual(len(all_cell), 2)
self.assertTrue(sn1 in all_nodes)
self.assertTrue(sn2 not in all_nodes)
self.assertTrue(sn3 in all_nodes)
self.assertTrue(sn4 not in all_nodes)
else:
self.assertEqual(len(pt.getCellList(1, False)), 0)
self.assertEqual(len(pt.getCellList(1, True)), 0)
def test_06_clear(self):
# add some nodes
num_partitions = 5
num_replicas = 2
pt = PartitionTable(num_partitions, num_replicas)
# add two kind of node, usable and unsable
uuid1 = self.getStorageUUID()
server1 = ("127.0.0.1", 19001)
sn1 = StorageNode(Mock(), server1, uuid1)
pt.setCell(0, sn1, CellStates.UP_TO_DATE)
uuid2 = self.getStorageUUID()
server2 = ("127.0.0.2", 19001)
sn2 = StorageNode(Mock(), server2, uuid2)
pt.setCell(1, sn2, CellStates.OUT_OF_DATE)
uuid3 = self.getStorageUUID()
server3 = ("127.0.0.3", 19001)
sn3 = StorageNode(Mock(), server3, uuid3)
pt.setCell(2, sn3, CellStates.FEEDING)
# now checks result
self.assertEqual(len(pt.partition_list[0]), 1)
self.assertEqual(len(pt.partition_list[1]), 1)
self.assertEqual(len(pt.partition_list[2]), 1)
pt.clear()
partition_list = pt.partition_list
self.assertEqual(len(partition_list), num_partitions)
for x in xrange(num_partitions):
part = partition_list[x]
self.assertTrue(isinstance(part, list))
self.assertEqual(len(part), 0)
self.assertEqual(len(pt.count_dict), 0)
def test_07_getNodeSet(self):
num_partitions = 5
num_replicas = 2
pt = PartitionTable(num_partitions, num_replicas)
# add two kind of node, usable and unsable
uuid1 = self.getStorageUUID()
server1 = ("127.0.0.1", 19001)
sn1 = StorageNode(Mock(), server1, uuid1)
pt.setCell(0, sn1, CellStates.UP_TO_DATE)
uuid2 = self.getStorageUUID()
server2 = ("127.0.0.2", 19001)
sn2 = StorageNode(Mock(), server2, uuid2)
pt.setCell(0, sn2, CellStates.OUT_OF_DATE)
uuid3 = self.getStorageUUID()
server3 = ("127.0.0.3", 19001)
sn3 = StorageNode(Mock(), server3, uuid3)
pt.setCell(0, sn3, CellStates.FEEDING)
uuid4 = self.getStorageUUID()
server4 = ("127.0.0.4", 19001)
sn4 = StorageNode(Mock(), server4, uuid4)
pt.setCell(0, sn4, CellStates.DISCARDED) # won't be added
# must get only two node as feeding and discarded not taken
# into account
self.assertEqual(pt.getNodeSet(True), {sn1, sn3})
self.assertEqual(len(pt.getNodeSet()), 3)
def test_08_filled(self):
num_partitions = 5
num_replicas = 2
pt = PartitionTable(num_partitions, num_replicas)
self.assertEqual(pt.np, num_partitions)
self.assertEqual(pt.num_filled_rows, 0)
self.assertFalse(pt.filled())
# adding a node in all partition
uuid1 = self.getStorageUUID()
server1 = ("127.0.0.1", 19001)
sn1 = StorageNode(Mock(), server1, uuid1)
for x in xrange(num_partitions):
pt.setCell(x, sn1, CellStates.UP_TO_DATE)
self.assertEqual(pt.num_filled_rows, num_partitions)
self.assertTrue(pt.filled())
def test_09_hasOffset(self):
num_partitions = 5
num_replicas = 2
pt = PartitionTable(num_partitions, num_replicas)
# add two kind of node, usable and unsable
uuid1 = self.getStorageUUID()
server1 = ("127.0.0.1", 19001)
sn1 = StorageNode(Mock(), server1, uuid1)
pt.setCell(0, sn1, CellStates.UP_TO_DATE)
# now test
self.assertTrue(pt.hasOffset(0))
self.assertFalse(pt.hasOffset(1))
# unknonw partition
self.assertFalse(pt.hasOffset(50))
def test_10_operational(self):
num_partitions = 5
num_replicas = 2
pt = PartitionTable(num_partitions, num_replicas)
self.assertFalse(pt.filled())
self.assertFalse(pt.operational())
# adding a node in all partition
uuid1 = self.getStorageUUID()
server1 = ("127.0.0.1", 19001)
sn1 = StorageNode(Mock(), server1, uuid1)
for x in xrange(num_partitions):
pt.setCell(x, sn1, CellStates.UP_TO_DATE)
self.assertTrue(pt.filled())
# it's up to date and running, so operational
sn1.setState(NodeStates.RUNNING)
self.assertTrue(pt.operational())
# same with feeding state
pt.clear()
self.assertFalse(pt.filled())
self.assertFalse(pt.operational())
# adding a node in all partition
uuid1 = self.getStorageUUID()
server1 = ("127.0.0.1", 19001)
sn1 = StorageNode(Mock(), server1, uuid1)
for x in xrange(num_partitions):
pt.setCell(x, sn1, CellStates.FEEDING)
self.assertTrue(pt.filled())
# it's feeding and running, so operational
sn1.setState(NodeStates.RUNNING)
self.assertTrue(pt.operational())
# same with feeding state but non running node
pt.clear()
self.assertFalse(pt.filled())
self.assertFalse(pt.operational())
# adding a node in all partition
uuid1 = self.getStorageUUID()
server1 = ("127.0.0.1", 19001)
sn1 = StorageNode(Mock(), server1, uuid1)
sn1.setState(NodeStates.TEMPORARILY_DOWN)
for x in xrange(num_partitions):
pt.setCell(x, sn1, CellStates.FEEDING)
self.assertTrue(pt.filled())
# it's up to date and not running, so not operational
self.assertFalse(pt.operational())
# same with out of date state and running
pt.clear()
self.assertFalse(pt.filled())
self.assertFalse(pt.operational())
# adding a node in all partition
uuid1 = self.getStorageUUID()
server1 = ("127.0.0.1", 19001)
sn1 = StorageNode(Mock(), server1, uuid1)
for x in xrange(num_partitions):
pt.setCell(x, sn1, CellStates.OUT_OF_DATE)
self.assertTrue(pt.filled())
# it's not up to date and running, so not operational
self.assertFalse(pt.operational())
def test_12_getRow(self):
num_partitions = 5
num_replicas = 2
pt = PartitionTable(num_partitions, num_replicas)
# add nodes
uuid1 = self.getStorageUUID()
server1 = ("127.0.0.1", 19001)
sn1 = StorageNode(Mock(), server1, uuid1)
pt.setCell(0, sn1, CellStates.UP_TO_DATE)
pt.setCell(1, sn1, CellStates.UP_TO_DATE)
pt.setCell(2, sn1, CellStates.UP_TO_DATE)
uuid2 = self.getStorageUUID()
server2 = ("127.0.0.2", 19001)
sn2 = StorageNode(Mock(), server2, uuid2)
pt.setCell(0, sn2, CellStates.UP_TO_DATE)
pt.setCell(1, sn2, CellStates.UP_TO_DATE)
uuid3 = self.getStorageUUID()
server3 = ("127.0.0.3", 19001)
sn3 = StorageNode(Mock(), server3, uuid3)
pt.setCell(0, sn3, CellStates.UP_TO_DATE)
# test
row_0 = pt.getRow(0)
self.assertEqual(len(row_0), 3)
for uuid, state in row_0:
self.assertTrue(uuid in (sn1.getUUID(), sn2.getUUID(), sn3.getUUID()))
self.assertEqual(state, CellStates.UP_TO_DATE)
row_1 = pt.getRow(1)
self.assertEqual(len(row_1), 2)
for uuid, state in row_1:
self.assertTrue(uuid in (sn1.getUUID(), sn2.getUUID()))
self.assertEqual(state, CellStates.UP_TO_DATE)
row_2 = pt.getRow(2)
self.assertEqual(len(row_2), 1)
for uuid, state in row_2:
self.assertEqual(uuid, sn1.getUUID())
self.assertEqual(state, CellStates.UP_TO_DATE)
row_3 = pt.getRow(3)
self.assertEqual(len(row_3), 0)
row_4 = pt.getRow(4)
self.assertEqual(len(row_4), 0)
# unknwon row
self.assertRaises(IndexError, pt.getRow, 5)
if __name__ == '__main__':
unittest.main()
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/tests/testUtil.py 0000664 0000000 0000000 00000004343 12601037530 0025212 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2006-2015 Nexedi SA
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import unittest
import socket
from . import NeoUnitTestBase
from neo.lib.util import ReadBuffer, parseNodeAddress
class UtilTests(NeoUnitTestBase):
def test_parseNodeAddress(self):
""" Parsing of addesses """
def test(parsed, *args):
self.assertEqual(parsed, parseNodeAddress(*args))
http_port = socket.getservbyname('http')
test(('127.0.0.1', 0), '127.0.0.1')
test(('127.0.0.1', 10), '127.0.0.1:10', 500)
test(('127.0.0.1', 500), '127.0.0.1', 500)
test(('::1', 0), '[::1]')
test(('::1', 10), '[::1]:10', 500)
test(('::1', 500), '[::1]', 500)
test(('::1', http_port), '[::1]:http')
test(('::1', 0), '[0::01]')
local_address = lambda port: (('127.0.0.1', port), ('::1', port))
self.assertIn(parseNodeAddress('localhost'), local_address(0))
self.assertIn(parseNodeAddress('localhost:10'), local_address(10))
def testReadBufferRead(self):
""" Append some chunk then consume the data """
buf = ReadBuffer()
self.assertEqual(len(buf), 0)
buf.append('abc')
self.assertEqual(len(buf), 3)
# no enough data
self.assertEqual(buf.read(4), None)
self.assertEqual(len(buf), 3)
buf.append('def')
# consume a part
self.assertEqual(len(buf), 6)
self.assertEqual(buf.read(4), 'abcd')
self.assertEqual(len(buf), 2)
# consume the rest
self.assertEqual(buf.read(3), None)
self.assertEqual(buf.read(2), 'ef')
if __name__ == "__main__":
unittest.main()
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/tests/threaded/ 0000775 0000000 0000000 00000000000 12601037530 0024577 5 ustar 00root root 0000000 0000000 neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/tests/threaded/__init__.py 0000664 0000000 0000000 00000070507 12601037530 0026721 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2011-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
# XXX: Consider using ClusterStates.STOPPING to stop clusters
import os, random, select, socket, sys, tempfile, threading, time, weakref
import traceback
from collections import deque
from ConfigParser import SafeConfigParser
from contextlib import contextmanager
from itertools import count
from functools import wraps
from zlib import decompress
from mock import Mock
import transaction, ZODB
import neo.admin.app, neo.master.app, neo.storage.app
import neo.client.app, neo.neoctl.app
from neo.client import Storage
from neo.lib import logging
from neo.lib.connection import BaseConnection, Connection
from neo.lib.connector import SocketConnector, \
ConnectorConnectionRefusedException
from neo.lib.locking import SimpleQueue
from neo.lib.protocol import CellStates, ClusterStates, NodeStates, NodeTypes
from neo.lib.util import cached_property, parseMasterList, p64
from .. import NeoTestBase, Patch, getTempDirectory, setupMySQLdb, \
ADDRESS_TYPE, IP_VERSION_FORMAT_DICT, DB_PREFIX, DB_USER
BIND = IP_VERSION_FORMAT_DICT[ADDRESS_TYPE], 0
LOCAL_IP = socket.inet_pton(ADDRESS_TYPE, IP_VERSION_FORMAT_DICT[ADDRESS_TYPE])
class FairLock(deque):
"""Same as a threading.Lock except that waiting threads are queued, so that
the first one waiting for the lock is the first to get it. This is useful
when several concurrent threads fight for the same resource in loop:
the owner could give too little time for other to get a chance to acquire,
blocking them for a long time with bad luck.
"""
def __enter__(self, _allocate_lock=threading.Lock):
me = _allocate_lock()
me.acquire()
self.append(me)
other = self[0]
while me is not other:
with other:
other = self[0]
def __exit__(self, t, v, tb):
self.popleft().release()
class Serialized(object):
"""
"Threaded" tests run all nodes in the same process as the test itself,
and threads are scheduled by this class, which mainly provides 2 features:
- more determinism, by minimizing the number of active threads, and
switching them in a round-robin;
- tic() method to wait only the necessary time for the cluster to be idle.
The basic concept is that each thread has a lock that always gets acquired
by itself. The following pattern is used to yield the processor to the next
thread:
release(); acquire()
It should be noted that this is not atomic, i.e. all other threads
sometimes complete before a thread tries to acquire its lock: in order that
the previous thread does not fail by releasing an un-acquired lock,
we actually use Semaphores instead of Locks.
The epoll object of each node is hooked so that thread switching happens
before polling for network activity. An extra epoll object is used to
detect which node has a readable epoll object.
"""
check_timeout = False
@classmethod
def init(cls):
cls._busy = set()
cls._busy_cond = threading.Condition(threading.Lock())
cls._epoll = select.epoll()
cls._pdb = None
cls._sched_lock = threading.Semaphore(0)
cls._tic_lock = FairLock()
cls._fd_dict = {}
@classmethod
def idle(cls, owner):
with cls._busy_cond:
cls._busy.discard(owner)
cls._busy_cond.notify_all()
@classmethod
def stop(cls):
assert not cls._fd_dict, cls._fd_dict
del(cls._busy, cls._busy_cond, cls._epoll, cls._fd_dict,
cls._pdb, cls._sched_lock, cls._tic_lock)
@classmethod
def _sort_key(cls, fd_event):
return -cls._fd_dict[fd_event[0]]._last
@classmethod
@contextmanager
def pdb(cls):
try:
cls._pdb = sys._getframe(2).f_trace.im_self
cls._pdb.set_continue()
except AttributeError:
pass
yield
p = cls._pdb
if p is not None:
cls._pdb = None
t = threading.currentThread()
p.stdout.write(getattr(t, 'node_name', t.name))
p.set_trace(sys._getframe(3))
@classmethod
def tic(cls, step=-1, check_timeout=()):
# If you're in a pdb here, 'n' switches to another thread
# (the following lines are not supposed to be debugged into)
with cls._tic_lock, cls.pdb():
f = sys._getframe(1)
try:
logging.info('tic (%s:%u) ...',
f.f_code.co_filename, f.f_lineno)
finally:
del f
if cls._busy:
with cls._busy_cond:
while cls._busy:
cls._busy_cond.wait()
for app in check_timeout:
app.em.epoll.check_timeout = True
app.em.wakeup()
del app
while step:
event_list = cls._epoll.poll(0)
if not event_list:
break
step -= 1
event_list.sort(key=cls._sort_key)
next_lock = cls._sched_lock
for fd, event in event_list:
self = cls._fd_dict[fd]
self._release_next = next_lock.release
next_lock = self._lock
del self
next_lock.release()
cls._sched_lock.acquire()
def __init__(self, app, busy=True):
self._epoll = app.em.epoll
app.em.epoll = self
# XXX: It may have been initialized before the SimpleQueue is patched.
thread_container = getattr(app, '_thread_container', None)
thread_container is None or thread_container.__init__()
if busy:
self._busy.add(self) # block tic until app waits for polling
def __getattr__(self, attr):
if attr in ('close', 'modify', 'register', 'unregister'):
return getattr(self._epoll, attr)
return self.__getattribute__(attr)
def poll(self, timeout):
if self.check_timeout:
assert timeout >= 0, (self, timeout)
del self.check_timeout
elif timeout:
with self.pdb(): # same as in tic()
release = self._release_next
self._release_next = None
release()
self._lock.acquire()
self._last = time.time()
return self._epoll.poll(timeout)
def _release_next(self):
self._last = time.time()
self._lock = threading.Semaphore(0)
fd = self._epoll.fileno()
cls = self.__class__
cls._fd_dict[fd] = self
cls._epoll.register(fd)
cls.idle(self)
def exit(self):
fd = self._epoll.fileno()
cls = self.__class__
if cls._fd_dict.pop(fd, None) is None:
cls.idle(self)
else:
cls._epoll.unregister(fd)
self._release_next()
class TestSerialized(Serialized):
def __init__(*args):
Serialized.__init__(busy=False, *args)
def poll(self, timeout):
if timeout:
while 1:
r = self._epoll.poll(0)
if r:
return r
Serialized.tic(step=1)
return self._epoll.poll(timeout)
class Node(object):
def getConnectionList(self, *peers):
addr = lambda c: c and (c.addr if c.is_server else c.getAddress())
addr_set = {addr(c.connector) for peer in peers
for c in peer.em.connection_dict.itervalues()
if isinstance(c, Connection)}
addr_set.discard(None)
return (c for c in self.em.connection_dict.itervalues()
if isinstance(c, Connection) and addr(c.connector) in addr_set)
def filterConnection(self, *peers):
return ConnectionFilter(self.getConnectionList(*peers))
class ServerNode(Node):
_server_class_dict = {}
class __metaclass__(type):
def __init__(cls, name, bases, d):
if Node not in bases and threading.Thread not in cls.__mro__:
cls.__bases__ = bases + (threading.Thread,)
cls.node_type = getattr(NodeTypes, name[:-11].upper())
cls._node_list = []
cls._virtual_ip = socket.inet_ntop(ADDRESS_TYPE,
LOCAL_IP[:-1] + chr(2 + len(cls._server_class_dict)))
cls._server_class_dict[cls._virtual_ip] = cls
@staticmethod
def resetPorts():
for cls in ServerNode._server_class_dict.itervalues():
del cls._node_list[:]
@classmethod
def newAddress(cls):
address = cls._virtual_ip, len(cls._node_list)
cls._node_list.append(None)
return address
@classmethod
def resolv(cls, address):
try:
cls = cls._server_class_dict[address[0]]
except KeyError:
return address
return cls._node_list[address[1]].getListeningAddress()
def __init__(self, cluster=None, address=None, **kw):
if not address:
address = self.newAddress()
if cluster is None:
master_nodes = kw['master_nodes']
name = kw['name']
else:
master_nodes = kw.get('master_nodes', cluster.master_nodes)
name = kw.get('name', cluster.name)
port = address[1]
self._node_list[port] = weakref.proxy(self)
self._init_args = init_args = kw.copy()
init_args['cluster'] = cluster
init_args['address'] = address
threading.Thread.__init__(self)
self.daemon = True
self.node_name = '%s_%u' % (self.node_type, port)
kw.update(getCluster=name, getBind=address,
getMasters=parseMasterList(master_nodes, address))
super(ServerNode, self).__init__(Mock(kw))
def getVirtualAddress(self):
return self._init_args['address']
def resetNode(self):
assert not self.is_alive()
kw = self._init_args
self.close()
self.__init__(**kw)
def start(self):
Serialized(self)
threading.Thread.start(self)
def run(self):
try:
super(ServerNode, self).run()
finally:
self._afterRun()
logging.debug('stopping %r', self)
self.em.epoll.exit()
def _afterRun(self):
try:
self.listening_conn.close()
except AttributeError:
pass
def getListeningAddress(self):
try:
return self.listening_conn.getAddress()
except AttributeError:
raise ConnectorConnectionRefusedException
class AdminApplication(ServerNode, neo.admin.app.Application):
pass
class MasterApplication(ServerNode, neo.master.app.Application):
pass
class StorageApplication(ServerNode, neo.storage.app.Application):
dm = type('', (), {'close': lambda self: None})()
def resetNode(self, clear_database=False):
self._init_args['getReset'] = clear_database
super(StorageApplication, self).resetNode()
def _afterRun(self):
super(StorageApplication, self)._afterRun()
try:
self.dm.close()
del self.dm
except StandardError: # AttributeError & ProgrammingError
pass
def getAdapter(self):
return self._init_args['getAdapter']
def switchTables(self):
q = self.dm.query
for table in 'trans', 'obj':
q('ALTER TABLE %s RENAME TO tmp' % table)
q('ALTER TABLE t%s RENAME TO %s' % (table, table))
q('ALTER TABLE tmp RENAME TO t%s' % table)
def getDataLockInfo(self):
dm = self.dm
index = tuple(dm.query("SELECT id, hash, compression FROM data"))
assert set(dm._uncommitted_data).issubset(x[0] for x in index)
get = dm._uncommitted_data.get
return {(str(h), c & 0x7f): get(i, 0) for i, h, c in index}
def sqlCount(self, table):
(r,), = self.dm.query("SELECT COUNT(*) FROM " + table)
return r
class ClientApplication(Node, neo.client.app.Application):
def __init__(self, master_nodes, name, **kw):
super(ClientApplication, self).__init__(master_nodes, name, **kw)
self.poll_thread.node_name = name
def _run(self):
try:
super(ClientApplication, self)._run()
finally:
self.em.epoll.exit()
def start(self):
isinstance(self.em.epoll, Serialized) or Serialized(self)
super(ClientApplication, self).start()
def getConnectionList(self, *peers):
for peer in peers:
if isinstance(peer, MasterApplication):
conn = self._getMasterConnection()
else:
assert isinstance(peer, StorageApplication)
conn = self.cp.getConnForNode(self.nm.getByUUID(peer.uuid))
yield conn
class NeoCTL(neo.neoctl.app.NeoCTL):
def __init__(self, *args, **kw):
super(NeoCTL, self).__init__(*args, **kw)
TestSerialized(self)
class LoggerThreadName(str):
def __new__(cls, default='TEST'):
return str.__new__(cls, default)
def __getattribute__(self, attr):
return getattr(str(self), attr)
def __hash__(self):
return id(self)
def __str__(self):
try:
return threading.currentThread().node_name
except AttributeError:
return str.__str__(self)
class ConnectionFilter(object):
filtered_count = 0
filter_list = []
filter_queue = weakref.WeakKeyDictionary()
lock = threading.Lock()
_addPacket = Connection._addPacket
@contextmanager
def __new__(cls, conn_list=()):
self = object.__new__(cls)
self.filter_dict = {}
self.conn_list = frozenset(conn_list)
if not cls.filter_list:
def _addPacket(conn, packet):
with cls.lock:
try:
queue = cls.filter_queue[conn]
except KeyError:
for self in cls.filter_list:
if self(conn, packet):
self.filtered_count += 1
break
else:
return cls._addPacket(conn, packet)
cls.filter_queue[conn] = queue = deque()
p = packet.__new__(packet.__class__)
p.__dict__.update(packet.__dict__)
queue.append(p)
Connection._addPacket = _addPacket
try:
cls.filter_list.append(self)
yield self
finally:
del cls.filter_list[-1:]
if not cls.filter_list:
Connection._addPacket = cls._addPacket.im_func
with cls.lock:
cls._retry()
def __call__(self, conn, packet):
if not self.conn_list or conn in self.conn_list:
for filter in self.filter_dict:
if filter(conn, packet):
return True
return False
@classmethod
def _retry(cls):
for conn, queue in cls.filter_queue.items():
while queue:
packet = queue.popleft()
for self in cls.filter_list:
if self(conn, packet):
queue.appendleft(packet)
break
else:
cls._addPacket(conn, packet)
continue
break
else:
del cls.filter_queue[conn]
def add(self, filter, *patches):
with self.lock:
self.filter_dict[filter] = patches
for p in patches:
p.apply()
def remove(self, *filters):
with self.lock:
for filter in filters:
del self.filter_dict[filter]
self._retry()
def discard(self, *filters):
try:
self.remove(*filters)
except KeyError:
pass
def __contains__(self, filter):
return filter in self.filter_dict
class NEOCluster(object):
def __init__(orig, self): # temporary definition for SimpleQueue patch
orig(self)
lock = self._lock
def _lock(blocking=True):
if blocking:
while not lock(False):
Serialized.tic(step=1)
return True
return lock(False)
self._lock = _lock
_patches = (
Patch(BaseConnection, getTimeout=lambda orig, self: None),
Patch(SimpleQueue, __init__=__init__),
Patch(SocketConnector, CONNECT_LIMIT=0),
Patch(SocketConnector, _bind=lambda orig, self, addr: orig(self, BIND)),
Patch(SocketConnector, _connect = lambda orig, self, addr:
orig(self, ServerNode.resolv(addr))))
_patch_count = 0
_resource_dict = weakref.WeakValueDictionary()
def _allocate(self, resource, new):
result = resource, new()
while result in self._resource_dict:
result = resource, new()
self._resource_dict[result] = self
return result[1]
@staticmethod
def _patch():
cls = NEOCluster
cls._patch_count += 1
if cls._patch_count > 1:
return
for patch in cls._patches:
patch.apply()
Serialized.init()
@staticmethod
def _unpatch():
cls = NEOCluster
assert cls._patch_count > 0
cls._patch_count -= 1
if cls._patch_count:
return
for patch in cls._patches:
patch.revert()
Serialized.stop()
def __init__(self, master_count=1, partitions=1, replicas=0, upstream=None,
adapter=os.getenv('NEO_TESTS_ADAPTER', 'SQLite'),
storage_count=None, db_list=None, clear_databases=True,
db_user=DB_USER, db_password='', compress=True,
importer=None, autostart=None):
self.name = 'neo_%s' % self._allocate('name',
lambda: random.randint(0, 100))
self.compress = compress
master_list = [MasterApplication.newAddress()
for _ in xrange(master_count)]
self.master_nodes = ' '.join('%s:%s' % x for x in master_list)
weak_self = weakref.proxy(self)
kw = dict(cluster=weak_self, getReplicas=replicas, getAdapter=adapter,
getPartitions=partitions, getReset=clear_databases)
if upstream is not None:
self.upstream = weakref.proxy(upstream)
kw.update(getUpstreamCluster=upstream.name,
getUpstreamMasters=parseMasterList(upstream.master_nodes))
self.master_list = [MasterApplication(getAutostart=autostart,
address=x, **kw)
for x in master_list]
if db_list is None:
if storage_count is None:
storage_count = replicas + 1
index = count().next
db_list = ['%s%u' % (DB_PREFIX, self._allocate('db', index))
for _ in xrange(storage_count)]
if adapter == 'MySQL':
setupMySQLdb(db_list, db_user, db_password, clear_databases)
db = '%s:%s@%%s' % (db_user, db_password)
elif adapter == 'SQLite':
db = os.path.join(getTempDirectory(), '%s.sqlite')
else:
assert False, adapter
if importer:
cfg = SafeConfigParser()
cfg.add_section("neo")
cfg.set("neo", "adapter", adapter)
cfg.set("neo", "database", db % tuple(db_list))
for name, zodb in importer:
cfg.add_section(name)
for x in zodb.iteritems():
cfg.set(name, *x)
db = os.path.join(getTempDirectory(), '%s.conf')
with open(db % tuple(db_list), "w") as f:
cfg.write(f)
kw["getAdapter"] = "Importer"
self.storage_list = [StorageApplication(getDatabase=db % x, **kw)
for x in db_list]
self.admin_list = [AdminApplication(**kw)]
self.neoctl = NeoCTL(self.admin.getVirtualAddress())
def __repr__(self):
return "<%s(%s) at 0x%x>" % (self.__class__.__name__,
self.name, id(self))
# A few shortcuts that work when there's only 1 master/storage/admin
@property
def master(self):
master, = self.master_list
return master
@property
def storage(self):
storage, = self.storage_list
return storage
@property
def admin(self):
admin, = self.admin_list
return admin
###
@property
def primary_master(self):
master, = [master for master in self.master_list if master.primary]
return master
def reset(self, clear_database=False):
for node_type in 'master', 'storage', 'admin':
kw = {}
if node_type == 'storage':
kw['clear_database'] = clear_database
for node in getattr(self, node_type + '_list'):
node.resetNode(**kw)
self.neoctl.close()
self.neoctl = NeoCTL(self.admin.getVirtualAddress())
def start(self, storage_list=None, fast_startup=False):
self._patch()
for node_type in 'master', 'admin':
for node in getattr(self, node_type + '_list'):
node.start()
Serialized.tic()
if fast_startup:
self.startCluster()
if storage_list is None:
storage_list = self.storage_list
for node in storage_list:
node.start()
Serialized.tic()
if not fast_startup:
self.startCluster()
Serialized.tic()
state = self.neoctl.getClusterState()
assert state in (ClusterStates.RUNNING, ClusterStates.BACKINGUP), state
self.enableStorageList(storage_list)
@cached_property
def client(self):
client = ClientApplication(name=self.name,
master_nodes=self.master_nodes, compress=self.compress)
# Make sure client won't be reused after it was closed.
def close():
client = self.client
del self.client, client.close
client.close()
client.close = close
return client
@cached_property
def db(self):
return ZODB.DB(storage=self.getZODBStorage())
def startCluster(self):
try:
self.neoctl.startCluster()
except RuntimeError:
Serialized.tic()
if self.neoctl.getClusterState() not in (
ClusterStates.BACKINGUP,
ClusterStates.RUNNING,
ClusterStates.VERIFYING,
):
raise
def enableStorageList(self, storage_list):
self.neoctl.enableStorageList([x.uuid for x in storage_list])
Serialized.tic()
for node in storage_list:
assert self.getNodeState(node) == NodeStates.RUNNING
def join(self, thread_list, timeout=5):
timeout += time.time()
while thread_list:
assert time.time() < timeout
Serialized.tic()
thread_list = [t for t in thread_list if t.is_alive()]
def stop(self):
logging.debug("stopping %s", self)
client = self.__dict__.get("client")
client is None or self.__dict__.pop("db", client).close()
node_list = self.admin_list + self.storage_list + self.master_list
for node in node_list:
node.em.wakeup(True)
try:
node_list.append(client.poll_thread)
except AttributeError: # client is None or thread is already stopped
pass
self.join(node_list)
logging.debug("stopped %s", self)
self._unpatch()
def getNodeState(self, node):
uuid = node.uuid
for node in self.neoctl.getNodeList(node.node_type):
if node[2] == uuid:
return node[3]
def getOudatedCells(self):
return [cell for row in self.neoctl.getPartitionRowList()[1]
for cell in row[1]
if cell[1] == CellStates.OUT_OF_DATE]
def getZODBStorage(self, **kw):
return Storage.Storage(None, self.name, _app=self.client, **kw)
def importZODB(self, dummy_zodb=None, random=random):
if dummy_zodb is None:
from ..stat_zodb import PROD1
dummy_zodb = PROD1(random)
preindex = {}
as_storage = dummy_zodb.as_storage
return lambda count: self.getZODBStorage().importFrom(
as_storage(count), preindex=preindex)
def populate(self, transaction_list, tid=lambda i: p64(i+1),
oid=lambda i: p64(i+1)):
storage = self.getZODBStorage()
tid_dict = {}
for i, oid_list in enumerate(transaction_list):
txn = transaction.Transaction()
storage.tpc_begin(txn, tid(i))
for o in oid_list:
storage.store(oid(o), tid_dict.get(o), repr((i, o)), '', txn)
storage.tpc_vote(txn)
i = storage.tpc_finish(txn)
for o in oid_list:
tid_dict[o] = i
def getTransaction(self):
txn = transaction.TransactionManager()
return txn, self.db.open(transaction_manager=txn)
def __del__(self, __print_exc=traceback.print_exc):
try:
self.neoctl.close()
for node_type in 'admin', 'storage', 'master':
for node in getattr(self, node_type + '_list'):
node.close()
except:
__print_exc()
raise
def extraCellSortKey(self, key):
return Patch(self.client.cp, getCellSortKey=lambda orig, cell:
(orig(cell), key(cell)))
class NEOThreadedTest(NeoTestBase):
def setupLog(self):
log_file = os.path.join(getTempDirectory(), self.id() + '.log')
logging.setup(log_file)
return LoggerThreadName()
def _tearDown(self, success):
super(NEOThreadedTest, self)._tearDown(success)
ServerNode.resetPorts()
if success:
with logging as db:
db.execute("UPDATE packet SET body=NULL")
db.execute("VACUUM")
tic = Serialized.tic
def getUnpickler(self, conn):
reader = conn._reader
def unpickler(data, compression=False):
if compression:
data = decompress(data)
obj = reader.getGhost(data)
reader.setGhostState(obj, data)
return obj
return unpickler
class newThread(threading.Thread):
def __init__(self, func, *args, **kw):
threading.Thread.__init__(self)
self.__target = func, args, kw
self.daemon = True
self.start()
def run(self):
try:
apply(*self.__target)
self.__exc_info = None
except:
self.__exc_info = sys.exc_info()
def join(self, timeout=None):
threading.Thread.join(self, timeout)
if not self.is_alive() and self.__exc_info:
etype, value, tb = self.__exc_info
del self.__exc_info
raise etype, value, tb
def predictable_random(seed=None):
# Because we have 2 running threads when client works, we can't
# patch neo.client.pool (and cluster should have 1 storage).
from neo.master import backup_app
from neo.master.handlers import administration
from neo.storage import replicator
def decorator(wrapped):
def wrapper(*args, **kw):
s = repr(time.time()) if seed is None else seed
logging.info("using seed %r", s)
r = random.Random(s)
try:
administration.random = backup_app.random = replicator.random \
= r
return wrapped(*args, **kw)
finally:
administration.random = backup_app.random = replicator.random \
= random
return wraps(wrapped)(wrapper)
return decorator
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/tests/threaded/test.py 0000664 0000000 0000000 00000076751 12601037530 0026150 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2011-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import os
import sys
import threading
import transaction
import unittest
from thread import get_ident
from zlib import compress
from persistent import Persistent
from ZODB import POSException
from neo.storage.transactions import TransactionManager, \
DelayedError, ConflictError
from neo.lib.connection import ConnectionClosed, MTClientConnection
from neo.lib.protocol import CellStates, ClusterStates, NodeStates, Packets, \
ZERO_TID
from .. import expectedFailure, _UnexpectedSuccess, Patch
from . import ClientApplication, NEOCluster, NEOThreadedTest
from neo.lib.util import add64, makeChecksum
from neo.client.exception import NEOStorageError
from neo.client.pool import CELL_CONNECTED, CELL_GOOD
class PCounter(Persistent):
value = 0
class PCounterWithResolution(PCounter):
def _p_resolveConflict(self, old, saved, new):
new['value'] += saved['value'] - old.get('value', 0)
return new
class Test(NEOThreadedTest):
def testBasicStore(self):
cluster = NEOCluster()
try:
cluster.start()
storage = cluster.getZODBStorage()
data_info = {}
compressible = 'x' * 20
compressed = compress(compressible)
oid_list = []
if cluster.storage.getAdapter() == 'SQLite':
big = None
data = 'foo', '', 'foo', compressed, compressible
else:
big = os.urandom(65536) * 600
assert len(big) < len(compress(big))
data = ('foo', big, '', 'foo', big[:2**24-1], big,
compressed, compressible, big[:2**24])
self.assertFalse(cluster.storage.sqlCount('bigdata'))
self.assertFalse(cluster.storage.sqlCount('data'))
for data in data:
if data is compressible:
key = makeChecksum(compressed), 1
else:
key = makeChecksum(data), 0
oid = storage.new_oid()
txn = transaction.Transaction()
storage.tpc_begin(txn)
r1 = storage.store(oid, None, data, '', txn)
r2 = storage.tpc_vote(txn)
data_info[key] = 1
self.assertEqual(data_info, cluster.storage.getDataLockInfo())
serial = storage.tpc_finish(txn)
data_info[key] = 0
self.tic()
self.assertEqual(data_info, cluster.storage.getDataLockInfo())
self.assertEqual((data, serial), storage.load(oid, ''))
storage._cache.clear()
self.assertEqual((data, serial), storage.load(oid, ''))
self.assertEqual((data, serial), storage.load(oid, ''))
oid_list.append((oid, data, serial))
if big:
self.assertTrue(cluster.storage.sqlCount('bigdata'))
self.assertTrue(cluster.storage.sqlCount('data'))
for i, (oid, data, serial) in enumerate(oid_list, 1):
storage._cache.clear()
cluster.storage.dm.deleteObject(oid)
self.assertRaises(POSException.POSKeyError,
storage.load, oid, '')
for oid, data, serial in oid_list[i:]:
self.assertEqual((data, serial), storage.load(oid, ''))
if big:
self.assertFalse(cluster.storage.sqlCount('bigdata'))
self.assertFalse(cluster.storage.sqlCount('data'))
finally:
cluster.stop()
def testDeleteObject(self):
cluster = NEOCluster()
try:
cluster.start()
storage = cluster.getZODBStorage()
for clear_cache in 0, 1:
for tst in 'a.', 'bcd.':
oid = storage.new_oid()
serial = None
for data in tst:
txn = transaction.Transaction()
storage.tpc_begin(txn)
if data == '.':
storage.deleteObject(oid, serial, txn)
else:
storage.store(oid, serial, data, '', txn)
storage.tpc_vote(txn)
serial = storage.tpc_finish(txn)
if clear_cache:
storage._cache.clear()
self.assertRaises(POSException.POSKeyError,
storage.load, oid, '')
finally:
cluster.stop()
def testCreationUndoneHistory(self):
cluster = NEOCluster()
try:
cluster.start()
storage = cluster.getZODBStorage()
oid = storage.new_oid()
txn = transaction.Transaction()
storage.tpc_begin(txn)
storage.store(oid, None, 'foo', '', txn)
storage.tpc_vote(txn)
tid1 = storage.tpc_finish(txn)
storage.tpc_begin(txn)
storage.undo(tid1, txn)
tid2 = storage.tpc_finish(txn)
storage.tpc_begin(txn)
storage.undo(tid2, txn)
tid3 = storage.tpc_finish(txn)
expected = [(tid1, 3), (tid2, 0), (tid3, 3)]
for x in storage.history(oid, 10):
self.assertEqual((x['tid'], x['size']), expected.pop())
self.assertFalse(expected)
finally:
cluster.stop()
def testStorageDataLock(self):
cluster = NEOCluster()
try:
cluster.start()
storage = cluster.getZODBStorage()
data_info = {}
data = 'foo'
key = makeChecksum(data), 0
oid = storage.new_oid()
txn = transaction.Transaction()
storage.tpc_begin(txn)
r1 = storage.store(oid, None, data, '', txn)
r2 = storage.tpc_vote(txn)
tid = storage.tpc_finish(txn)
data_info[key] = 0
storage.sync()
txn = [transaction.Transaction() for x in xrange(3)]
for t in txn:
storage.tpc_begin(t)
storage.store(tid and oid or storage.new_oid(),
tid, data, '', t)
tid = None
for t in txn:
storage.tpc_vote(t)
data_info[key] = 3
self.assertEqual(data_info, cluster.storage.getDataLockInfo())
storage.tpc_abort(txn[1])
storage.sync()
data_info[key] -= 1
self.assertEqual(data_info, cluster.storage.getDataLockInfo())
tid1 = storage.tpc_finish(txn[2])
self.tic()
data_info[key] -= 1
self.assertEqual(data_info, cluster.storage.getDataLockInfo())
storage.tpc_abort(txn[0])
storage.sync()
data_info[key] -= 1
self.assertEqual(data_info, cluster.storage.getDataLockInfo())
finally:
cluster.stop()
def testDelayedUnlockInformation(self):
except_list = []
def delayUnlockInformation(conn, packet):
return isinstance(packet, Packets.NotifyUnlockInformation)
def onStoreObject(orig, tm, ttid, serial, oid, *args):
if oid == resume_oid and delayUnlockInformation in m2s:
m2s.remove(delayUnlockInformation)
try:
return orig(tm, ttid, serial, oid, *args)
except Exception, e:
except_list.append(e.__class__)
raise
cluster = NEOCluster(storage_count=1)
try:
cluster.start()
t, c = cluster.getTransaction()
c.root()[0] = ob = PCounter()
with cluster.master.filterConnection(cluster.storage) as m2s:
resume_oid = None
m2s.add(delayUnlockInformation,
Patch(TransactionManager, storeObject=onStoreObject))
t.commit()
resume_oid = ob._p_oid
ob._p_changed = 1
t.commit()
self.assertFalse(delayUnlockInformation in m2s)
finally:
cluster.stop()
self.assertEqual(except_list, [DelayedError])
def _testDeadlockAvoidance(self, scenario):
except_list = []
delay = threading.Event(), threading.Event()
ident = get_ident()
def onStoreObject(orig, tm, ttid, serial, oid, *args):
if oid == counter_oid:
scenario[1] -= 1
if not scenario[1]:
delay[0].set()
try:
return orig(tm, ttid, serial, oid, *args)
except Exception, e:
except_list.append(e.__class__)
raise
def onAsk(orig, conn, packet, *args, **kw):
c2 = get_ident() == ident
switch = isinstance(packet, Packets.AskBeginTransaction)
if switch:
if c2:
delay[1].wait()
elif isinstance(packet, (Packets.AskStoreObject,
Packets.AskFinishTransaction)):
delay[c2].wait()
scenario[0] -= 1
switch = not scenario[0]
try:
return orig(conn, packet, *args, **kw)
finally:
if switch:
delay[c2].clear()
delay[1-c2].set()
cluster = NEOCluster(storage_count=2, replicas=1)
try:
cluster.start()
t, c = cluster.getTransaction()
c.root()[0] = ob = PCounterWithResolution()
t.commit()
counter_oid = ob._p_oid
del ob, t, c
t1, c1 = cluster.getTransaction()
t2, c2 = cluster.getTransaction()
o1 = c1.root()[0]
o2 = c2.root()[0]
o1.value += 1
o2.value += 2
with Patch(TransactionManager, storeObject=onStoreObject), \
Patch(MTClientConnection, ask=onAsk):
t = self.newThread(t1.commit)
t2.commit()
t.join()
t1.begin()
t2.begin()
self.assertEqual(o1.value, 3)
self.assertEqual(o2.value, 3)
finally:
cluster.stop()
return except_list
def testDelayedStore(self):
# 0: C1 -> S1, S2
# 1: C2 -> S1, S2 (delayed)
# 2: C1 commits
# 3: C2 resolves conflict
self.assertEqual(self._testDeadlockAvoidance([2, 4]),
[DelayedError, DelayedError, ConflictError, ConflictError])
@expectedFailure(POSException.ConflictError)
def testDeadlockAvoidance(self):
# This test fail because deadlock avoidance is not fully implemented.
# 0: C1 -> S1
# 1: C2 -> S1, S2 (delayed)
# 2: C1 -> S2 (deadlock)
# 3: C2 commits
# 4: C1 resolves conflict
self.assertEqual(self._testDeadlockAvoidance([1, 3]),
[DelayedError, ConflictError, "???" ])
def testConflictResolutionTriggered2(self):
""" Check that conflict resolution works """
cluster = NEOCluster()
try:
cluster.start()
# create the initial object
t, c = cluster.getTransaction()
c.root()['with_resolution'] = ob = PCounterWithResolution()
t.commit()
self.assertEqual(ob._p_changed, 0)
oid = ob._p_oid
tid1 = ob._p_serial
self.assertNotEqual(tid1, ZERO_TID)
del ob, t, c
# then check resolution
t1, c1 = cluster.getTransaction()
t2, c2 = cluster.getTransaction()
o1 = c1.root()['with_resolution']
o2 = c2.root()['with_resolution']
self.assertEqual(o1.value, 0)
self.assertEqual(o2.value, 0)
o1.value += 1
o2.value += 2
t1.commit()
self.assertEqual(o1._p_changed, 0)
tid2 = o1._p_serial
self.assertTrue(tid1 < tid2)
self.assertEqual(o1.value, 1)
self.assertEqual(o2.value, 2)
t2.commit()
self.assertEqual(o2._p_changed, None)
t1.begin()
t2.begin()
self.assertEqual(o2.value, 3)
self.assertEqual(o1.value, 3)
tid3 = o1._p_serial
self.assertTrue(tid2 < tid3)
self.assertEqual(tid3, o2._p_serial)
# check history
history = c1.db().history
self.assertEqual([x['tid'] for x in history(oid, size=1)], [tid3])
self.assertEqual([x['tid'] for x in history(oid, size=10)],
[tid3, tid2, tid1])
finally:
cluster.stop()
def test_notifyNodeInformation(self):
# translated from MasterNotificationsHandlerTests
# (neo.tests.client.testMasterHandler)
cluster = NEOCluster(replicas=1)
try:
cluster.start()
cluster.db # open DB
s0, s1 = cluster.client.nm.getStorageList()
conn = s0.getConnection()
self.assertFalse(conn.isClosed())
getCellSortKey = cluster.client.cp.getCellSortKey
self.assertEqual(getCellSortKey(s0), CELL_CONNECTED)
cluster.neoctl.dropNode(s0.getUUID())
self.assertEqual([s1], cluster.client.nm.getStorageList())
self.assertTrue(conn.isClosed())
self.assertEqual(getCellSortKey(s0), CELL_GOOD)
# XXX: the test originally checked that 'unregister' method
# was called (even if it's useless in this case),
# but we would need an API to do that easily.
self.assertFalse(cluster.client.dispatcher.registered(conn))
finally:
cluster.stop()
def testRestartWithMissingStorage(self):
# translated from neo.tests.functional.testStorage.StorageTest
cluster = NEOCluster(replicas=1, partitions=10)
s1, s2 = cluster.storage_list
try:
cluster.start()
self.assertEqual([], cluster.getOudatedCells())
finally:
cluster.stop()
# restart it with one storage only
cluster.reset()
try:
cluster.start(storage_list=(s1,))
self.assertEqual(NodeStates.UNKNOWN, cluster.getNodeState(s2))
finally:
cluster.stop()
def testVerificationCommitUnfinishedTransactions(self):
""" Verification step should commit unfinished transactions """
# translated from neo.tests.functional.testCluster.ClusterTests
cluster = NEOCluster()
try:
cluster.start()
t, c = cluster.getTransaction()
c.root()[0] = 'ok'
t.commit()
self.tic()
data_info = cluster.storage.getDataLockInfo()
self.assertEqual(data_info.values(), [0, 0])
# (obj|trans) become t(obj|trans)
cluster.storage.switchTables()
finally:
cluster.stop()
cluster.reset()
self.assertEqual(dict.fromkeys(data_info, 1),
cluster.storage.getDataLockInfo())
try:
cluster.start()
t, c = cluster.getTransaction()
# transaction should be verified and commited
self.assertEqual(c.root()[0], 'ok')
self.assertEqual(data_info, cluster.storage.getDataLockInfo())
finally:
cluster.stop()
def testStorageReconnectDuringStore(self):
cluster = NEOCluster(replicas=1)
try:
cluster.start()
t, c = cluster.getTransaction()
c.root()[0] = 'ok'
while cluster.client.cp.connection_dict:
cluster.client.cp._dropConnections()
t.commit() # store request
finally:
cluster.stop()
def testStorageReconnectDuringTransactionLog(self):
cluster = NEOCluster(storage_count=2, partitions=2)
try:
cluster.start()
t, c = cluster.getTransaction()
while cluster.client.cp.connection_dict:
cluster.client.cp._dropConnections()
tid, (t1,) = cluster.client.transactionLog(
ZERO_TID, c.db().lastTransaction(), 10)
finally:
cluster.stop()
def testStorageReconnectDuringUndoLog(self):
cluster = NEOCluster(storage_count=2, partitions=2)
try:
cluster.start()
t, c = cluster.getTransaction()
while cluster.client.cp.connection_dict:
cluster.client.cp._dropConnections()
t1, = cluster.client.undoLog(0, 10)
finally:
cluster.stop()
def testDropNodeThenRestartCluster(self):
""" Start a cluster with more than one storage, down one, shutdown the
cluster then restart it. The partition table recovered must not include
the dropped node """
def checkNodeState(state):
self.assertEqual(cluster.getNodeState(s1), state)
self.assertEqual(cluster.getNodeState(s2), NodeStates.RUNNING)
# start with two storage / one replica
cluster = NEOCluster(storage_count=2, replicas=1)
s1, s2 = cluster.storage_list
try:
cluster.start()
checkNodeState(NodeStates.RUNNING)
self.assertEqual([], cluster.getOudatedCells())
# drop one
cluster.neoctl.dropNode(s1.uuid)
checkNodeState(None)
self.tic() # Let node state update reach remaining storage
checkNodeState(None)
self.assertEqual([], cluster.getOudatedCells())
# restart with s2 only
finally:
cluster.stop()
cluster.reset()
try:
cluster.start(storage_list=[s2])
checkNodeState(None)
# then restart it, it must be in pending state
s1.start()
self.tic()
checkNodeState(NodeStates.PENDING)
finally:
cluster.stop()
def test2Clusters(self):
cluster1 = NEOCluster()
cluster2 = NEOCluster()
try:
cluster1.start()
cluster2.start()
t1, c1 = cluster1.getTransaction()
t2, c2 = cluster2.getTransaction()
c1.root()['1'] = c2.root()['2'] = ''
t1.commit()
t2.commit()
finally:
cluster1.stop()
cluster2.stop()
def testAbortStorage(self):
cluster = NEOCluster(partitions=2, storage_count=2)
storage = cluster.storage_list[0]
try:
cluster.start()
# prevent storage to reconnect, in order to easily test
# that cluster becomes non-operational
storage.connectToPrimary = sys.exit
# send an unexpected to master so it aborts connection to storage
storage.master_conn.answer(Packets.Pong())
self.tic()
self.assertEqual(cluster.neoctl.getClusterState(),
ClusterStates.VERIFYING)
finally:
cluster.stop()
def testShutdown(self):
cluster = NEOCluster(master_count=3, partitions=10,
replicas=1, storage_count=3)
try:
cluster.start()
# fill DB a little
t, c = cluster.getTransaction()
c.root()[''] = ''
t.commit()
# tell admin to shutdown the cluster
cluster.neoctl.setClusterState(ClusterStates.STOPPING)
# all nodes except clients should exit
cluster.join(cluster.master_list
+ cluster.storage_list
+ cluster.admin_list)
finally:
cluster.stop()
cluster.reset() # reopen DB to check partition tables
dm = cluster.storage_list[0].dm
self.assertEqual(1, dm.getPTID())
pt = list(dm.getPartitionTable())
self.assertEqual(20, len(pt))
for _, _, state in pt:
self.assertEqual(state, CellStates.UP_TO_DATE)
for s in cluster.storage_list[1:]:
self.assertEqual(s.dm.getPTID(), 1)
self.assertEqual(list(s.dm.getPartitionTable()), pt)
def testInternalInvalidation(self):
l1 = threading.Lock(); l1.acquire()
l2 = threading.Lock(); l2.acquire()
def _handlePacket(orig, conn, packet, kw={}, handler=None):
if type(packet) is Packets.AnswerTransactionFinished:
l1.release()
l2.acquire()
orig(conn, packet, kw, handler)
cluster = NEOCluster()
try:
cluster.start()
t1, c1 = cluster.getTransaction()
c1.root()['x'] = x1 = PCounter()
t1.commit()
t1.begin()
x1.value = 1
t2, c2 = cluster.getTransaction()
x2 = c2.root()['x']
p = Patch(cluster.client, _handlePacket=_handlePacket)
try:
p.apply()
t = self.newThread(t1.commit)
l1.acquire()
t2.begin()
finally:
del p
l2.release()
t.join()
self.assertEqual(x2.value, 1)
finally:
cluster.stop()
def testExternalInvalidation(self):
cluster = NEOCluster()
try:
cluster.start()
cache = cluster.client._cache
# Initialize objects
t1, c1 = cluster.getTransaction()
c1.root()['x'] = x1 = PCounter()
c1.root()['y'] = y = PCounter()
y.value = 1
t1.commit()
# Get pickle of y
t1.begin()
x = c1._storage.load(x1._p_oid)[0]
y = c1._storage.load(y._p_oid)[0]
# Start the testing transaction
# (at this time, we still have x=0 and y=1)
t2, c2 = cluster.getTransaction()
# Copy y to x using a different Master-Client connection
client = ClientApplication(name=cluster.name,
master_nodes=cluster.master_nodes)
txn = transaction.Transaction()
client.tpc_begin(txn)
client.store(x1._p_oid, x1._p_serial, y, '', txn)
# Delay invalidation for x
with cluster.master.filterConnection(cluster.client) as m2c:
m2c.add(lambda conn, packet:
isinstance(packet, Packets.InvalidateObjects))
tid = client.tpc_finish(txn, None)
# Change to x is committed. Testing connection must ask the
# storage node to return original value of x, even if we
# haven't processed yet any invalidation for x.
x2 = c2.root()['x']
cache.clear() # bypass cache
self.assertEqual(x2.value, 0)
x2._p_deactivate()
t1.begin() # process invalidation and sync connection storage
self.assertEqual(x2.value, 0)
# New testing transaction. Now we can see the last value of x.
t2.begin()
self.assertEqual(x2.value, 1)
# Now test cache invalidation during a load from a storage
l1 = threading.Lock(); l1.acquire()
l2 = threading.Lock(); l2.acquire()
def _loadFromStorage(orig, *args):
try:
return orig(*args)
finally:
l1.release()
l2.acquire()
x2._p_deactivate()
# Remove last version of x from cache
cache._remove(cache._oid_dict[x2._p_oid].pop())
p = Patch(cluster.client, _loadFromStorage=_loadFromStorage)
try:
p.apply()
t = self.newThread(x2._p_activate)
l1.acquire()
# At this point, x could not be found the cache and the result
# from the storage (which is ) is about
# to be processed.
# Now modify x to receive an invalidation for it.
txn = transaction.Transaction()
client.tpc_begin(txn)
client.store(x2._p_oid, tid, x, '', txn) # value=0
tid = client.tpc_finish(txn, None)
t1.begin() # make sure invalidation is processed
finally:
del p
# Resume processing of answer from storage. An entry should be
# added in cache for x=1 with a fixed next_tid (i.e. not None)
l2.release()
t.join()
self.assertEqual(x2.value, 1)
self.assertEqual(x1.value, 0)
# l1 is acquired and l2 is released
# Change x again from 0 to 1, while the checking connection c1
# is suspended at the beginning of the transaction t1,
# between Storage.sync() and flush of invalidations.
def _flush_invalidations(orig):
l1.release()
l2.acquire()
orig()
x1._p_deactivate()
t1.abort()
p = Patch(c1, _flush_invalidations=_flush_invalidations)
try:
p.apply()
t = self.newThread(t1.begin)
l1.acquire()
txn = transaction.Transaction()
client.tpc_begin(txn)
client.store(x2._p_oid, tid, y, '', txn)
tid = client.tpc_finish(txn, None)
client.close()
finally:
del p
l2.release()
t.join()
# A transaction really begins when it acquires the lock to flush
# invalidations. The previous lastTransaction() only does a ping
# to make sure we have a recent enough view of the DB.
self.assertEqual(x1.value, 1)
finally:
cluster.stop()
def testClientReconnection(self):
conn = [None]
def getConnForNode(orig, node):
self.assertTrue(node.isRunning())
return conn.pop()
cluster = NEOCluster()
try:
cluster.start()
t1, c1 = cluster.getTransaction()
c1.root()['x'] = x1 = PCounter()
c1.root()['y'] = y = PCounter()
y.value = 1
t1.commit()
x = c1._storage.load(x1._p_oid)[0]
y = c1._storage.load(y._p_oid)[0]
# close connections to master & storage
c, = cluster.master.nm.getClientList()
c.getConnection().close()
c, = cluster.storage.nm.getClientList()
c.getConnection().close()
self.tic()
# modify x with another client
client = ClientApplication(name=cluster.name,
master_nodes=cluster.master_nodes)
txn = transaction.Transaction()
client.tpc_begin(txn)
client.store(x1._p_oid, x1._p_serial, y, '', txn)
tid = client.tpc_finish(txn, None)
client.close()
self.tic()
# Check reconnection to storage.
with Patch(cluster.client.cp, getConnForNode=getConnForNode):
self.assertFalse(cluster.client.history(x1._p_oid))
self.assertFalse(conn)
self.assertTrue(cluster.client.history(x1._p_oid))
# Check successful reconnection to master.
t1.begin()
self.assertEqual(x1._p_changed ,None)
self.assertEqual(x1.value, 1)
finally:
cluster.stop()
def testInvalidTTID(self):
cluster = NEOCluster()
try:
cluster.start()
client = cluster.client
txn = transaction.Transaction()
client.tpc_begin(txn)
txn_context = client._txn_container.get(txn)
txn_context['ttid'] = add64(txn_context['ttid'], 1)
self.assertRaises(POSException.StorageError,
client.tpc_finish, txn, None)
finally:
cluster.stop()
def testStorageFailureDuringTpcFinish(self):
def answerTransactionFinished(conn, packet):
if isinstance(packet, Packets.AnswerTransactionFinished):
c, = cluster.storage.getConnectionList(cluster.master)
c.abort()
cluster = NEOCluster()
try:
cluster.start()
t, c = cluster.getTransaction()
c.root()['x'] = PCounter()
with cluster.master.filterConnection(cluster.client) as m2c:
m2c.add(answerTransactionFinished)
# XXX: This is an expected failure. A ttid column was added to
# 'trans' table to permit recovery, by checking that the
# transaction was really committed.
try:
t.commit()
raise _UnexpectedSuccess
except ConnectionClosed:
pass
t.begin()
expectedFailure(self.assertIn)('x', c.root())
finally:
cluster.stop()
def testEmptyTransaction(self):
cluster = NEOCluster()
try:
cluster.start()
txn = transaction.Transaction()
storage = cluster.getZODBStorage()
storage.tpc_begin(txn)
storage.tpc_vote(txn)
serial = storage.tpc_finish(txn)
t, = storage.iterator()
self.assertEqual(t.tid, serial)
self.assertFalse(t.oid_list)
finally:
cluster.stop()
def testRecycledClientUUID(self):
def delayNotifyInformation(conn, packet):
return isinstance(packet, Packets.NotifyNodeInformation)
def notReady(orig, *args):
m2s.discard(delayNotifyInformation)
return orig(*args)
cluster = NEOCluster()
try:
cluster.start()
cluster.getTransaction()
with cluster.master.filterConnection(cluster.storage) as m2s:
m2s.add(delayNotifyInformation)
cluster.client.master_conn.close()
client = ClientApplication(name=cluster.name,
master_nodes=cluster.master_nodes)
p = Patch(client.storage_bootstrap_handler, notReady=notReady)
try:
p.apply()
x = client.load(ZERO_TID)
finally:
del p
client.close()
self.assertNotIn(delayNotifyInformation, m2s)
finally:
cluster.stop()
def testAutostart(self):
def startCluster():
getClusterState = cluster.neoctl.getClusterState
self.assertEqual(ClusterStates.RECOVERING, getClusterState())
cluster.storage_list[2].start()
cluster = NEOCluster(storage_count=3, autostart=3)
try:
cluster.startCluster = startCluster
cluster.start(cluster.storage_list[:2])
finally:
cluster.stop()
del cluster.startCluster
if __name__ == "__main__":
unittest.main()
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/tests/threaded/testImporter.py 0000664 0000000 0000000 00000015537 12601037530 0027665 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2014-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
from collections import deque
from cPickle import Pickler, Unpickler
from cStringIO import StringIO
from itertools import islice, izip_longest
import os, time, unittest
import neo, transaction, ZODB
from neo.lib import logging
from neo.lib.util import u64
from neo.storage.database.importer import Repickler
from ..fs2zodb import Inode
from .. import getTempDirectory
from . import NEOCluster, NEOThreadedTest
from ZODB.FileStorage import FileStorage
class Equal:
_recurse = {}
def __hash__(self):
return 1
def __eq__(self, other):
return type(self) is type(other) and self.__dict__ == other.__dict__
def __repr__(self):
return "<%s(%s)>" % (self.__class__.__name__,
", ".join("%s=%r" % k for k in self.__dict__.iteritems()))
class Reduce(Equal, object):
state = None
def __init__(self, *args):
self.args = args
self._l = []
self._d = []
def append(self, item):
self._l.append(item)
def extend(self, item):
self._l.extend(item)
def __setitem__(self, *args):
self._d.append(args)
def __setstate__(self, state):
self.state = state
def __reduce__(self):
r = self.__class__, self.args, self.state, iter(self._l), iter(self._d)
return r[:5 if self._d else
4 if self._l else
3 if self.state is not None else
2]
class Obj(Equal):
state = None
def __getinitargs__(self):
return self.args
def __init__(self, *args):
self.args = args
def __getstate__(self):
return self.state
def __setstate__(self, state):
self.state = state
class NewObj(Obj, object):
def __init__(self):
pass # __getinitargs__ only work with old-style classes
class DummyRepickler(Repickler):
def __init__(self):
Repickler.__init__(self, None)
_changed = True
def __setattr__(self, name, value):
if name != "_changed":
self.__dict__[name] = value
class ImporterTests(NEOThreadedTest):
def testRepickler(self):
r2 = Obj("foo")
r2.__setstate__("bar")
r2 = Reduce(r2)
r3 = Reduce(1, 2)
r3.__setstate__(NewObj())
r4 = Reduce()
r4.args = r2.args
r4.__setstate__("bar")
r4.extend("!!!")
r5 = Reduce()
r5.append("!!!")
r5["foo"] = "bar"
state = {r2: r3, r4: r5}
p = StringIO()
Pickler(p, 1).dump(Obj).dump(state)
p = p.getvalue()
r = DummyRepickler()(p)
load = Unpickler(StringIO(r)).load
self.assertIs(Obj, load())
self.assertDictEqual(state, load())
def test(self):
importer = []
fs_dir = os.path.join(getTempDirectory(), self.id())
os.mkdir(fs_dir)
src_root, = neo.__path__
fs_list = "root", "client", "master", "tests"
def root_filter(name):
if not name.endswith(".pyc"):
i = name.find(os.sep)
return i < 0 or name[:i] not in fs_list
def sub_filter(name):
return lambda n: n[-4:] != '.pyc' and \
n.split(os.sep, 1)[0] in (name, "scripts")
conn_list = []
iter_list = []
# Setup several FileStorage databases.
for i, name in enumerate(fs_list):
fs_path = os.path.join(fs_dir, name + ".fs")
c = ZODB.DB(FileStorage(fs_path)).open()
r = c.root()["neo"] = Inode()
transaction.commit()
conn_list.append(c)
iter_list.append(r.treeFromFs(src_root, 10,
sub_filter(name) if i else root_filter))
importer.append((name, {
"storage": "\npath %s\n" % fs_path
}))
# Populate FileStorage databases.
for iter_list in izip_longest(*iter_list):
for i in iter_list:
if i:
transaction.commit()
del iter_list
# Get oids of mount points and close.
for (name, cfg), c in zip(importer, conn_list):
r = c.root()["neo"]
if name == "root":
for name in fs_list[1:]:
cfg[name] = str(u64(r[name]._p_oid))
else:
cfg["oid"] = str(u64(r[name]._p_oid))
c.db().close()
#del importer[0][1][importer.pop()[0]]
# Start NEO cluster with transparent import of a multi-base ZODB.
cluster = NEOCluster(compress=False, importer=importer)
try:
# Suspend import for a while, so that import
# is finished in the middle of the below 'for' loop.
# Use a slightly different main loop for storage so that it
# does not import data too fast and we test read/write access
# by the client during the import.
dm = cluster.storage.dm
def doOperation(app):
del dm.doOperation
try:
while True:
if app.task_queue:
app.task_queue[-1].next()
app._poll()
except StopIteration:
app.task_queue.pop()
dm.doOperation = doOperation
cluster.start()
t, c = cluster.getTransaction()
r = c.root()["neo"]
self.assertRaisesRegexp(NotImplementedError, " getObjectHistory$",
c.db().history, r._p_oid)
i = r.walk()
next(islice(i, 9, None))
dm.doOperation(cluster.storage) # resume
deque(i, maxlen=0)
last_import = None
for i, r in enumerate(r.treeFromFs(src_root, 10)):
t.commit()
if cluster.storage.dm._import:
last_import = i
self.tic()
self.assertTrue(last_import and not cluster.storage.dm._import)
i = len(src_root) + 1
self.assertEqual(sorted(r.walk()), sorted(
(x[i:] or '.', sorted(y), sorted(z))
for x, y, z in os.walk(src_root)))
t.commit()
finally:
cluster.stop()
if __name__ == "__main__":
unittest.main()
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/tests/threaded/testReplication.py 0000664 0000000 0000000 00000041621 12601037530 0030326 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2012-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import random
import time
import unittest
from collections import defaultdict
from functools import wraps
from neo.lib import logging
from neo.storage.checker import CHECK_COUNT
from neo.lib.connector import SocketConnector
from neo.lib.connection import ClientConnection
from neo.lib.event import EventManager
from neo.lib.protocol import CellStates, ClusterStates, Packets, \
ZERO_OID, ZERO_TID, MAX_TID, uuid_str
from neo.lib.util import p64
from .. import Patch
from . import ConnectionFilter, NEOCluster, NEOThreadedTest, predictable_random
def backup_test(partitions=1, upstream_kw={}, backup_kw={}):
def decorator(wrapped):
def wrapper(self):
upstream = NEOCluster(partitions, **upstream_kw)
try:
upstream.start()
backup = NEOCluster(partitions, upstream=upstream, **backup_kw)
try:
backup.start()
backup.neoctl.setClusterState(ClusterStates.STARTING_BACKUP)
self.tic()
wrapped(self, backup)
finally:
backup.stop()
finally:
upstream.stop()
return wraps(wrapped)(wrapper)
return decorator
class ReplicationTests(NEOThreadedTest):
def checksumPartition(self, storage, partition, max_tid=MAX_TID):
dm = storage.dm
args = partition, None, ZERO_TID, max_tid
return dm.checkTIDRange(*args), \
dm.checkSerialRange(min_oid=ZERO_OID, *args)
def checkPartitionReplicated(self, source, destination, partition, **kw):
self.assertEqual(self.checksumPartition(source, partition, **kw),
self.checksumPartition(destination, partition, **kw))
def checkBackup(self, cluster, **kw):
upstream_pt = cluster.upstream.primary_master.pt
pt = cluster.primary_master.pt
np = pt.getPartitions()
self.assertEqual(np, upstream_pt.getPartitions())
checked = 0
source_dict = {x.uuid: x for x in cluster.upstream.storage_list}
for storage in cluster.storage_list:
self.assertFalse(storage.dm._uncommitted_data)
self.assertEqual(np, storage.pt.getPartitions())
for partition in pt.getAssignedPartitionList(storage.uuid):
cell_list = upstream_pt.getCellList(partition, readable=True)
source = source_dict[random.choice(cell_list).getUUID()]
self.checkPartitionReplicated(source, storage, partition, **kw)
checked += 1
return checked
def testBackupNormalCase(self):
np = 7
nr = 2
check_dict = dict.fromkeys(xrange(np))
upstream = NEOCluster(partitions=np, replicas=nr-1, storage_count=3)
try:
upstream.start()
importZODB = upstream.importZODB()
importZODB(3)
backup = NEOCluster(partitions=np, replicas=nr-1, storage_count=5,
upstream=upstream)
try:
backup.start()
# Initialize & catch up.
backup.neoctl.setClusterState(ClusterStates.STARTING_BACKUP)
self.tic()
self.assertEqual(np*nr, self.checkBackup(backup))
# Normal case, following upstream cluster closely.
importZODB(17)
self.tic()
self.assertEqual(np*nr, self.checkBackup(backup))
# Check that a backup cluster can be restarted.
finally:
backup.stop()
backup.reset()
try:
backup.start()
self.assertEqual(backup.neoctl.getClusterState(),
ClusterStates.BACKINGUP)
importZODB(17)
self.tic()
self.assertEqual(np*nr, self.checkBackup(backup))
backup.neoctl.checkReplicas(check_dict, ZERO_TID, None)
self.tic()
# Stop backing up, nothing truncated.
backup.neoctl.setClusterState(ClusterStates.STOPPING_BACKUP)
self.tic()
self.assertEqual(np*nr, self.checkBackup(backup))
self.assertEqual(backup.neoctl.getClusterState(),
ClusterStates.RUNNING)
finally:
backup.stop()
def delaySecondary(conn, packet):
if isinstance(packet, Packets.Replicate):
tid, upstream_name, source_dict = packet.decode()
return not upstream_name and all(source_dict.itervalues())
backup.reset()
try:
backup.start()
backup.neoctl.setClusterState(ClusterStates.STARTING_BACKUP)
self.tic()
with backup.master.filterConnection(*backup.storage_list) as f:
f.add(delaySecondary)
while not f.filtered_count:
importZODB(1)
self.tic()
backup.neoctl.setClusterState(ClusterStates.STOPPING_BACKUP)
self.tic()
self.tic()
self.assertEqual(np*nr, self.checkBackup(backup,
max_tid=backup.master.getLastTransaction()))
finally:
backup.stop()
backup.reset()
try:
backup.start()
backup.neoctl.setClusterState(ClusterStates.STARTING_BACKUP)
self.tic()
with ConnectionFilter() as f:
f.add(lambda conn, packet: conn.getUUID() is None and
isinstance(packet, Packets.AddObject))
while not f.filtered_count:
importZODB(1)
self.tic()
backup.neoctl.setClusterState(ClusterStates.STOPPING_BACKUP)
self.tic()
self.tic()
self.assertEqual(np*nr, self.checkBackup(backup,
max_tid=backup.master.getLastTransaction()))
finally:
backup.stop()
finally:
upstream.stop()
@predictable_random()
def testBackupNodeLost(self):
"""Check backup cluster can recover after random connection loss
- backup master disconnected from upstream master
- primary storage disconnected from backup master
- non-primary storage disconnected from backup master
"""
np = 4
check_dict = dict.fromkeys(xrange(np))
from neo.master.backup_app import random
def fetchObjects(orig, min_tid=None, min_oid=ZERO_OID):
if min_tid is None:
counts[0] += 1
if counts[0] > 1:
orig.im_self.app.master_conn.close()
return orig(min_tid, min_oid)
def onTransactionCommitted(orig, txn):
counts[0] += 1
if counts[0] > 1:
node_list = orig.im_self.nm.getClientList(only_identified=True)
node_list.remove(txn.getNode())
node_list[0].getConnection().close()
return orig(txn)
upstream = NEOCluster(partitions=np, replicas=0, storage_count=1)
try:
upstream.start()
importZODB = upstream.importZODB(random=random)
# Do not start with an empty DB so that 'primary_dict' below is not
# empty on the first iteration.
importZODB(1)
backup = NEOCluster(partitions=np, replicas=2, storage_count=4,
upstream=upstream)
try:
backup.start()
backup.neoctl.setClusterState(ClusterStates.STARTING_BACKUP)
self.tic()
storage_list = [x.uuid for x in backup.storage_list]
slave = set(xrange(len(storage_list))).difference
for event in xrange(10):
counts = [0]
if event == 5:
p = Patch(upstream.master.tm,
_on_commit=onTransactionCommitted)
else:
primary_dict = defaultdict(list)
for k, v in sorted(backup.master.backup_app
.primary_partition_dict.iteritems()):
primary_dict[storage_list.index(v._uuid)].append(k)
if event % 2:
storage = slave(primary_dict).pop()
else:
storage, partition_list = primary_dict.popitem()
# Populate until the found storage performs
# a second replication partially and aborts.
p = Patch(backup.storage_list[storage].replicator,
fetchObjects=fetchObjects)
with p:
importZODB(lambda x: counts[0] > 1)
if event > 5:
backup.neoctl.checkReplicas(check_dict, ZERO_TID, None)
self.tic()
self.assertEqual(np*3, self.checkBackup(backup))
finally:
backup.stop()
finally:
upstream.stop()
@backup_test()
def testBackupUpstreamMasterDead(self, backup):
"""Check proper behaviour when upstream master is unreachable
More generally, this checks that when a handler raises when a connection
is closed voluntarily, the connection is in a consistent state and can
be, for example, closed again after the exception is catched, without
assertion failure.
"""
conn, = backup.master.getConnectionList(backup.upstream.master)
# trigger ping
self.assertFalse(conn.isPending())
conn.onTimeout()
self.assertTrue(conn.isPending())
# force ping to have expired
# connection will be closed before upstream master has time
# to answer
def _poll(orig, self, blocking):
if backup.master.em is self:
p.revert()
conn.onTimeout()
else:
orig(self, blocking)
with Patch(EventManager, _poll=_poll) as p:
self.tic()
new_conn, = backup.master.getConnectionList(backup.upstream.master)
self.assertIsNot(new_conn, conn)
@backup_test()
def testBackupUpstreamStorageDead(self, backup):
upstream = backup.upstream
with ConnectionFilter() as f:
f.add(lambda conn, packet:
isinstance(packet, Packets.InvalidateObjects))
upstream.importZODB()(1)
count = [0]
def _connect(orig, conn):
count[0] += 1
orig(conn)
with Patch(ClientConnection, _connect=_connect):
upstream.storage.listening_conn.close()
self.tic(step=2)
self.assertEqual(count[0], 0)
t = SocketConnector.CONNECT_LIMIT = .5
t += time.time()
self.tic()
# 1st attempt failed, 2nd is deferred
self.assertEqual(count[0], 2)
self.tic(check_timeout=(backup.storage,))
# 2nd failed, 3rd deferred
self.assertEqual(count[0], 4)
self.assertTrue(t <= time.time())
@backup_test()
def testBackupDelayedUnlockTransaction(self, backup):
"""
Check that a backup storage node is put on hold by upstream if
the requested transaction is still locked. Such case happens when
the backup cluster reacts very quickly to a new transaction.
"""
upstream = backup.upstream
with upstream.master.filterConnection(upstream.storage) as f:
f.add(lambda conn, packet:
isinstance(packet, Packets.NotifyUnlockInformation))
upstream.importZODB()(1)
self.tic()
self.tic()
self.assertEqual(1, self.checkBackup(backup))
def testReplicationAbortedBySource(self):
"""
Check that a feeding node aborts replication when its partition is
dropped, and that the out-of-date node finishes to replicate from
another source.
Here are the different states of partitions over time:
pt: 0: U|U|U
pt: 0: UO.|U.O|FOO
pt: 0: UU.|U.O|FOO
pt: 0: UU.|U.U|FOO # nodes 1 & 2 replicate from node 0
pt: 0: UU.|U.U|.OU # here node 0 lost partition 2
# and node 1 must switch to node 2
pt: 0: UU.|U.U|.UU
"""
def delayAskFetch(conn, packet):
return isinstance(packet, delayed) and \
packet.decode()[0] == offset and \
conn in s1.getConnectionList(s0)
def changePartitionTable(orig, ptid, cell_list):
if (offset, s0.uuid, CellStates.DISCARDED) in cell_list:
connection_filter.remove(delayAskFetch)
# XXX: this is currently not done by
# default for performance reason
orig.im_self.dropPartitions((offset,))
return orig(ptid, cell_list)
np = 3
cluster = NEOCluster(partitions=np, replicas=1, storage_count=3)
s0, s1, s2 = cluster.storage_list
for delayed in Packets.AskFetchTransactions, Packets.AskFetchObjects:
try:
cluster.start([s0])
cluster.populate([range(np*2)] * np)
s1.start()
s2.start()
self.tic()
cluster.neoctl.enableStorageList([s1.uuid, s2.uuid])
cluster.neoctl.tweakPartitionTable()
offset, = [offset for offset, row in enumerate(
cluster.master.pt.partition_list)
for cell in row if cell.isFeeding()]
with ConnectionFilter() as connection_filter:
connection_filter.add(delayAskFetch,
Patch(s0.dm, changePartitionTable=changePartitionTable))
self.tic()
self.assertEqual(1, connection_filter.filtered_count)
self.tic()
self.checkPartitionReplicated(s1, s2, offset)
finally:
cluster.stop()
cluster.reset(True)
def testCheckReplicas(self):
from neo.storage import checker
def corrupt(offset):
s0, s1, s2 = (storage_dict[cell.getUUID()]
for cell in cluster.master.pt.getCellList(offset, True))
logging.info('corrupt partition %u of %s',
offset, uuid_str(s1.uuid))
s1.dm.deleteObject(p64(np+offset), p64(corrupt_tid))
return s0.uuid
def check(expected_state, expected_count):
self.assertEqual(expected_count, len([None
for row in cluster.neoctl.getPartitionRowList()[1]
for cell in row[1]
if cell[1] == CellStates.CORRUPTED]))
self.assertEqual(expected_state, cluster.neoctl.getClusterState())
np = 5
tid_count = np * 3
corrupt_tid = tid_count // 2
check_dict = dict.fromkeys(xrange(np))
cluster = NEOCluster(partitions=np, replicas=2, storage_count=3)
try:
checker.CHECK_COUNT = 2
cluster.start()
cluster.populate([range(np*2)] * tid_count)
storage_dict = {x.uuid: x for x in cluster.storage_list}
cluster.neoctl.checkReplicas(check_dict, ZERO_TID, None)
self.tic()
check(ClusterStates.RUNNING, 0)
source = corrupt(0)
cluster.neoctl.checkReplicas(check_dict, p64(corrupt_tid+1), None)
self.tic()
check(ClusterStates.RUNNING, 0)
cluster.neoctl.checkReplicas({0: source}, ZERO_TID, None)
self.tic()
check(ClusterStates.RUNNING, 1)
corrupt(1)
cluster.neoctl.checkReplicas(check_dict, p64(corrupt_tid+1), None)
self.tic()
check(ClusterStates.RUNNING, 1)
cluster.neoctl.checkReplicas(check_dict, ZERO_TID, None)
self.tic()
check(ClusterStates.VERIFYING, 4)
finally:
checker.CHECK_COUNT = CHECK_COUNT
cluster.stop()
if __name__ == "__main__":
unittest.main()
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/tests/zodb/ 0000775 0000000 0000000 00000000000 12601037530 0023755 5 ustar 00root root 0000000 0000000 neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/tests/zodb/__init__.py 0000664 0000000 0000000 00000003701 12601037530 0026067 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2009-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import os
from .. import DB_PREFIX
functional = int(os.getenv('NEO_TEST_ZODB_FUNCTIONAL', 0))
if functional:
from ..functional import NEOCluster, NEOFunctionalTest as TestCase
else:
from ..threaded import NEOCluster, NEOThreadedTest as TestCase
class ZODBTestCase(TestCase):
def setUp(self, cluster_kw={}):
super(ZODBTestCase, self).setUp()
storages = int(os.getenv('NEO_TEST_ZODB_STORAGES', 1))
kw = {
'master_count': int(os.getenv('NEO_TEST_ZODB_MASTERS', 1)),
'replicas': int(os.getenv('NEO_TEST_ZODB_REPLICAS', 0)),
'partitions': int(os.getenv('NEO_TEST_ZODB_PARTITIONS', 1)),
'db_list': ['%s%u' % (DB_PREFIX, i) for i in xrange(storages)],
}
kw.update(cluster_kw)
if functional:
kw['temp_dir'] = self.getTempDirectory()
self.neo = NEOCluster(**kw)
self.neo.start()
self.open()
def _tearDown(self, success):
self._storage.cleanup()
self.neo.stop()
del self.neo, self._storage
super(ZODBTestCase, self)._tearDown(success)
assertEquals = failUnlessEqual = TestCase.assertEqual
assertNotEquals = failIfEqual = TestCase.assertNotEqual
def open(self, **kw):
self._storage = self.neo.getZODBStorage(**kw)
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/tests/zodb/testBasic.py 0000664 0000000 0000000 00000001771 12601037530 0026256 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2009-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import unittest
from ZODB.tests.BasicStorage import BasicStorage
from ZODB.tests.StorageTestBase import StorageTestBase
from . import ZODBTestCase
class BasicTests(ZODBTestCase, StorageTestBase, BasicStorage):
pass
if __name__ == "__main__":
suite = unittest.makeSuite(BasicTests, 'check')
unittest.main(defaultTest='suite')
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/tests/zodb/testConflict.py 0000664 0000000 0000000 00000002035 12601037530 0026770 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2009-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import unittest
from ZODB.tests.ConflictResolution import ConflictResolvingStorage
from ZODB.tests.StorageTestBase import StorageTestBase
from . import ZODBTestCase
class ConflictTests(ZODBTestCase, StorageTestBase, ConflictResolvingStorage):
pass
if __name__ == "__main__":
suite = unittest.makeSuite(ConflictTests, 'check')
unittest.main(defaultTest='suite')
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/tests/zodb/testHistory.py 0000664 0000000 0000000 00000002003 12601037530 0026663 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2009-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import unittest
from ZODB.tests.HistoryStorage import HistoryStorage
from ZODB.tests.StorageTestBase import StorageTestBase
from . import ZODBTestCase
class HistoryTests(ZODBTestCase, StorageTestBase, HistoryStorage):
pass
if __name__ == "__main__":
suite = unittest.makeSuite(HistoryTests, 'check')
unittest.main(defaultTest='suite')
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/tests/zodb/testIterator.py 0000664 0000000 0000000 00000002150 12601037530 0027016 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2009-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import unittest
from ZODB.tests.IteratorStorage import IteratorStorage
from ZODB.tests.IteratorStorage import ExtendedIteratorStorage
from ZODB.tests.StorageTestBase import StorageTestBase
from . import ZODBTestCase
class IteratorTests(ZODBTestCase, StorageTestBase, IteratorStorage,
ExtendedIteratorStorage):
pass
if __name__ == "__main__":
suite = unittest.makeSuite(IteratorTests, 'check')
unittest.main(defaultTest='suite')
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/tests/zodb/testMT.py 0000664 0000000 0000000 00000001752 12601037530 0025554 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2009-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import unittest
from ZODB.tests.MTStorage import MTStorage
from ZODB.tests.StorageTestBase import StorageTestBase
from . import ZODBTestCase
class MTTests(ZODBTestCase, StorageTestBase, MTStorage):
pass
if __name__ == "__main__":
suite = unittest.makeSuite(MTTests, 'check')
unittest.main(defaultTest='suite')
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/tests/zodb/testPack.py 0000664 0000000 0000000 00000002617 12601037530 0026113 0 ustar 00root root 0000000 0000000
#
# Copyright (C) 2009-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import unittest
from ZODB.tests.PackableStorage import \
PackableStorageWithOptionalGC, PackableUndoStorage
from ZODB.tests.StorageTestBase import StorageTestBase
from .. import expectedFailure
from . import ZODBTestCase
class PackableTests(ZODBTestCase, StorageTestBase,
PackableStorageWithOptionalGC, PackableUndoStorage):
def setUp(self):
super(PackableTests, self).setUp(cluster_kw={'adapter': 'MySQL'})
checkPackAllRevisions = expectedFailure()(
PackableStorageWithOptionalGC.checkPackAllRevisions)
checkPackUndoLog = expectedFailure()(PackableUndoStorage.checkPackUndoLog)
if __name__ == "__main__":
suite = unittest.makeSuite(PackableTests, 'check')
unittest.main(defaultTest='suite')
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/tests/zodb/testPersistent.py 0000664 0000000 0000000 00000002022 12601037530 0027363 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2009-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import unittest
from ZODB.tests.PersistentStorage import PersistentStorage
from ZODB.tests.StorageTestBase import StorageTestBase
from . import ZODBTestCase
class PersistentTests(ZODBTestCase, StorageTestBase, PersistentStorage):
pass
if __name__ == "__main__":
suite = unittest.makeSuite(PersistentTests, 'check')
unittest.main(defaultTest='suite')
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/tests/zodb/testReadOnly.py 0000664 0000000 0000000 00000002010 12601037530 0026735 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2009-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import unittest
from ZODB.tests.ReadOnlyStorage import ReadOnlyStorage
from ZODB.tests.StorageTestBase import StorageTestBase
from . import ZODBTestCase
class ReadOnlyTests(ZODBTestCase, StorageTestBase, ReadOnlyStorage):
pass
if __name__ == "__main__":
suite = unittest.makeSuite(ReadOnlyTests, 'check')
unittest.main(defaultTest='suite')
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/tests/zodb/testRecovery.py 0000664 0000000 0000000 00000003352 12601037530 0027030 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2009-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import os
import unittest
import ZODB
from ZODB.tests.RecoveryStorage import RecoveryStorage
from ZODB.tests.StorageTestBase import StorageTestBase
from ..functional import NEOCluster
from . import ZODBTestCase
class RecoveryTests(ZODBTestCase, StorageTestBase, RecoveryStorage):
def setUp(self):
super(RecoveryTests, self).setUp()
dst_temp_dir = self.getTempDirectory() + '-dst'
if not os.path.exists(dst_temp_dir):
os.makedirs(dst_temp_dir)
self.neo_dst = NEOCluster(['test_neo1-dst'], partitions=1, replicas=0,
master_count=1, temp_dir=dst_temp_dir)
self.neo_dst.stop()
self.neo_dst.setupDB()
self.neo_dst.start()
self._dst = self.neo.getZODBStorage()
self._dst_db = ZODB.DB(self._dst)
def _tearDown(self, success):
super(RecoveryTests, self)._tearDown(success)
self._dst_db.close()
self._dst.cleanup()
self.neo_dst.stop()
if __name__ == "__main__":
suite = unittest.makeSuite(RecoveryTests, 'check')
unittest.main(defaultTest='suite')
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/tests/zodb/testRevision.py 0000664 0000000 0000000 00000002010 12601037530 0027016 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2009-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import unittest
from ZODB.tests.RevisionStorage import RevisionStorage
from ZODB.tests.StorageTestBase import StorageTestBase
from . import ZODBTestCase
class RevisionTests(ZODBTestCase, StorageTestBase, RevisionStorage):
pass
if __name__ == "__main__":
suite = unittest.makeSuite(RevisionTests, 'check')
unittest.main(defaultTest='suite')
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/tests/zodb/testSynchronization.py 0000664 0000000 0000000 00000002036 12601037530 0030431 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2009-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import unittest
from ZODB.tests.StorageTestBase import StorageTestBase
from ZODB.tests.Synchronization import SynchronizedStorage
from . import ZODBTestCase
class SynchronizationTests(ZODBTestCase, StorageTestBase, SynchronizedStorage):
pass
if __name__ == "__main__":
suite = unittest.makeSuite(SynchronizationTests, 'check')
unittest.main(defaultTest='suite')
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/tests/zodb/testUndo.py 0000664 0000000 0000000 00000003422 12601037530 0026135 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2009-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import unittest
from ZODB.tests.StorageTestBase import StorageTestBase
from ZODB.tests.TransactionalUndoStorage import TransactionalUndoStorage
from ZODB.tests.ConflictResolution import ConflictResolvingTransUndoStorage
from .. import expectedFailure
from . import ZODBTestCase
class UndoTests(ZODBTestCase, StorageTestBase, TransactionalUndoStorage,
ConflictResolvingTransUndoStorage):
checkTransactionalUndoAfterPack = expectedFailure()(
TransactionalUndoStorage.checkTransactionalUndoAfterPack)
# Don't run this test. It cannot run with pipelined store, and is not executed
# on Zeo - but because Zeo doesn't have an iterator, while Neo has.
# Note that it is possible to run this test on Neo with a simple fix:
# instead of expecting "store" to return object's serial, it should
# just load it after commit, and keep its serial.
# When iterator is fully implemented in Neo, a fork of that test should be
# done with above fix.
del TransactionalUndoStorage.checkTransactionalUndoIterator
if __name__ == "__main__":
suite = unittest.makeSuite(UndoTests, 'check')
unittest.main(defaultTest='suite')
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/tests/zodb/testVersion.py 0000664 0000000 0000000 00000002216 12601037530 0026655 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2009-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import unittest
from ZODB.tests.VersionStorage import VersionStorage
from ZODB.tests.TransactionalUndoVersionStorage import \
TransactionalUndoVersionStorage
from ZODB.tests.StorageTestBase import StorageTestBase
from . import ZODBTestCase
class VersionTests(ZODBTestCase, StorageTestBase, VersionStorage,
TransactionalUndoVersionStorage):
pass
if __name__ == "__main__":
suite = unittest.makeSuite(VersionTests, 'check')
unittest.main(defaultTest='suite')
neoppod-7d5b155980afbc07eed092acc92f4d841ca7265b-neo/neo/tests/zodb/testZODB.py 0000664 0000000 0000000 00000003471 12601037530 0025772 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2009-2015 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import unittest
from ZODB.tests import testZODB
import ZODB
from . import ZODBTestCase
class NEOZODBTests(ZODBTestCase, testZODB.ZODBTests):
def setUp(self):
super(NEOZODBTests, self).setUp()
self._db = ZODB.DB(self._storage)
def _tearDown(self, success):
self._db.close()
super(NEOZODBTests, self)._tearDown(success)
def checkMultipleUndoInOneTransaction(self):
# XXX: Upstream test accesses a persistent object outside a transaction
# (it should call transaction.begin() after the last commit)
# so disable our Connection.afterCompletion optimization.
# This should really be discussed on zodb-dev ML.
from ZODB.Connection import Connection
afterCompletion = Connection.__dict__['afterCompletion']
try:
Connection.afterCompletion = Connection.__dict__['newTransaction']
super(NEOZODBTests, self).checkMultipleUndoInOneTransaction()
finally:
Connection.afterCompletion = afterCompletion
if __name__ == "__main__":
suite = unittest.makeSuite(NEOZODBTests, 'check')
unittest.main(defaultTest='suite')
|