Commit 143f147b authored by root's avatar root

Use tagged version of src/ZODB

parent 13d745d5
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""ZODB transfer activity monitoring
$Id: ActivityMonitor.py,v 1.5 2003/12/30 20:17:41 jeremy Exp $"""
__version__='$Revision: 1.5 $'[11:-2]
import time
class ActivityMonitor:
"""ZODB load/store activity monitor
This simple implementation just keeps a small log in memory
and iterates over the log when getActivityAnalysis() is called.
It assumes that log entries are added in chronological sequence,
which is only guaranteed because DB.py holds a lock when calling
the closedConnection() method.
"""
def __init__(self, history_length=3600):
self.history_length = history_length # Number of seconds
self.log = [] # [(time, loads, stores)]
def closedConnection(self, conn):
log = self.log
now = time.time()
loads, stores = conn.getTransferCounts(1)
log.append((now, loads, stores))
self.trim(now)
def trim(self, now):
log = self.log
cutoff = now - self.history_length
n = 0
loglen = len(log)
while n < loglen and log[n][0] < cutoff:
n = n + 1
if n:
del log[:n]
def setHistoryLength(self, history_length):
self.history_length = history_length
self.trim(time.time())
def getHistoryLength(self):
return self.history_length
def getActivityAnalysis(self, start=0, end=0, divisions=10):
res = []
now = time.time()
if start == 0:
start = now - self.history_length
if end == 0:
end = now
for n in range(divisions):
res.append({
'start': start + (end - start) * n / divisions,
'end': start + (end - start) * (n + 1) / divisions,
'loads': 0,
'stores': 0,
'connections': 0,
})
div = res[0]
div_end = div['end']
div_index = 0
connections = 0
total_loads = 0
total_stores = 0
for t, loads, stores in self.log:
if t < start:
# We could use a binary search to find the start.
continue
elif t > end:
# We could use a binary search to find the end also.
break
while t > div_end:
div['loads'] = total_loads
div['stores'] = total_stores
div['connections'] = connections
total_loads = 0
total_stores = 0
connections = 0
div_index = div_index + 1
if div_index < divisions:
div = res[div_index]
div_end = div['end']
connections = connections + 1
total_loads = total_loads + loads
total_stores = total_stores + stores
div['stores'] = div['stores'] + total_stores
div['loads'] = div['loads'] + total_loads
div['connections'] = div['connections'] + connections
return res
This diff is collapsed.
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
import sys
import logging
from cStringIO import StringIO
from cPickle import Unpickler, Pickler
from pickle import PicklingError
from ZODB.POSException import ConflictError
from ZODB.loglevels import BLATHER
logger = logging.getLogger('ZODB.ConflictResolution')
ResolvedSerial = 'rs'
class BadClassName(Exception):
pass
_class_cache = {}
_class_cache_get = _class_cache.get
def find_global(*args):
cls = _class_cache_get(args, 0)
if cls == 0:
# Not cached. Try to import
try:
module = __import__(args[0], {}, {}, ['cluck'])
except ImportError:
cls = 1
else:
cls = getattr(module, args[1], 1)
_class_cache[args] = cls
if cls == 1:
logger.log(BLATHER, "Unable to load class", exc_info=True)
if cls == 1:
# Not importable
raise BadClassName(*args)
return cls
def state(self, oid, serial, prfactory, p=''):
p = p or self.loadSerial(oid, serial)
file = StringIO(p)
unpickler = Unpickler(file)
unpickler.find_global = find_global
unpickler.persistent_load = prfactory.persistent_load
unpickler.load() # skip the class tuple
return unpickler.load()
class PersistentReference:
def __repr__(self):
return "PR(%s %s)" % (id(self), self.data)
def __getstate__(self):
raise PicklingError, "Can't pickle PersistentReference"
class PersistentReferenceFactory:
data = None
def persistent_load(self, oid):
if self.data is None:
self.data = {}
r = self.data.get(oid, None)
if r is None:
r = PersistentReference()
r.data = oid
self.data[oid] = r
return r
def persistent_id(object):
if getattr(object, '__class__', 0) is not PersistentReference:
return None
return object.data
_unresolvable = {}
def tryToResolveConflict(self, oid, committedSerial, oldSerial, newpickle,
committedData=''):
# class_tuple, old, committed, newstate = ('',''), 0, 0, 0
try:
prfactory = PersistentReferenceFactory()
file = StringIO(newpickle)
unpickler = Unpickler(file)
unpickler.find_global = find_global
unpickler.persistent_load = prfactory.persistent_load
meta = unpickler.load()
if isinstance(meta, tuple):
klass = meta[0]
newargs = meta[1] or ()
if isinstance(klass, tuple):
klass = find_global(*klass)
else:
klass = meta
newargs = ()
if klass in _unresolvable:
return None
newstate = unpickler.load()
inst = klass.__new__(klass, *newargs)
try:
resolve = inst._p_resolveConflict
except AttributeError:
_unresolvable[klass] = 1
return None
old = state(self, oid, oldSerial, prfactory)
committed = state(self, oid, committedSerial, prfactory, committedData)
resolved = resolve(old, committed, newstate)
file = StringIO()
pickler = Pickler(file,1)
pickler.persistent_id = persistent_id
pickler.dump(meta)
pickler.dump(resolved)
return file.getvalue(1)
except (ConflictError, BadClassName):
return None
except:
# If anything else went wrong, catch it here and avoid passing an
# arbitrary exception back to the client. The error here will mask
# the original ConflictError. A client can recover from a
# ConflictError, but not necessarily from other errors. But log
# the error so that any problems can be fixed.
logger.error("Unexpected error", exc_info=True)
return None
class ConflictResolvingStorage:
"Mix-in class that provides conflict resolution handling for storages"
tryToResolveConflict = tryToResolveConflict
This diff is collapsed.
This diff is collapsed.
BTrees
ZConfig
persistent
transaction
This diff is collapsed.
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""Support for database export and import."""
from cStringIO import StringIO
from cPickle import Pickler, Unpickler
from tempfile import TemporaryFile
import logging
from ZODB.POSException import ExportError
from ZODB.utils import p64, u64
from ZODB.serialize import referencesf
import sys
logger = logging.getLogger('ZODB.ExportImport')
class ExportImport:
def exportFile(self, oid, f=None):
if f is None:
f = TemporaryFile()
elif isinstance(f, str):
f = open(f,'w+b')
f.write('ZEXP')
oids = [oid]
done_oids = {}
done=done_oids.has_key
load=self._storage.load
while oids:
oid = oids.pop(0)
if oid in done_oids:
continue
done_oids[oid] = True
try:
p, serial = load(oid, self._version)
except:
logger.debug("broken reference for oid %s", repr(oid),
exc_info=True)
else:
referencesf(p, oids)
f.writelines([oid, p64(len(p)), p])
f.write(export_end_marker)
return f
def importFile(self, f, clue='', customImporters=None):
# This is tricky, because we need to work in a transaction!
if isinstance(f, str):
f = open(f,'rb')
magic = f.read(4)
if magic != 'ZEXP':
if customImporters and customImporters.has_key(magic):
f.seek(0)
return customImporters[magic](self, f, clue)
raise ExportError("Invalid export header")
t = self._txn_mgr.get()
if clue:
t.note(clue)
return_oid_list = []
self._import = f, return_oid_list
self._register()
t.commit(1)
# Return the root imported object.
if return_oid_list:
return self.get(return_oid_list[0])
else:
return None
def _importDuringCommit(self, transaction, f, return_oid_list):
"""Import data during two-phase commit.
Invoked by the transaction manager mid commit.
Appends one item, the OID of the first object created,
to return_oid_list.
"""
oids = {}
def persistent_load(ooid):
"""Remap a persistent id to a new ID and create a ghost for it."""
klass = None
if isinstance(ooid, tuple):
ooid, klass = ooid
if ooid in oids:
oid = oids[ooid]
else:
if klass is None:
oid = self._storage.new_oid()
else:
oid = self._storage.new_oid(), klass
oids[ooid] = oid
return Ghost(oid)
version = self._version
while 1:
h = f.read(16)
if h == export_end_marker:
break
if len(h) != 16:
raise ExportError("Truncated export file")
l = u64(h[8:16])
p = f.read(l)
if len(p) != l:
raise ExportError("Truncated export file")
ooid = h[:8]
if oids:
oid = oids[ooid]
if isinstance(oid, tuple):
oid = oid[0]
else:
oids[ooid] = oid = self._storage.new_oid()
return_oid_list.append(oid)
pfile = StringIO(p)
unpickler = Unpickler(pfile)
unpickler.persistent_load = persistent_load
newp = StringIO()
pickler = Pickler(newp, 1)
pickler.persistent_id = persistent_id
pickler.dump(unpickler.load())
pickler.dump(unpickler.load())
p = newp.getvalue()
self._storage.store(oid, None, p, version, transaction)
export_end_marker = '\377'*16
class Ghost(object):
__slots__ = ("oid",)
def __init__(self, oid):
self.oid = oid
def persistent_id(obj):
if isinstance(obj, Ghost):
return obj.oid
This diff is collapsed.
# this is a package
from ZODB.FileStorage.FileStorage \
import FileStorage, RecordIterator, FileIterator, Record, packed_version
This diff is collapsed.
from ZODB.FileStorage import FileIterator
from ZODB.FileStorage.format \
import TRANS_HDR, TRANS_HDR_LEN, DATA_HDR, DATA_HDR_LEN
from ZODB.TimeStamp import TimeStamp
from ZODB.utils import u64
from ZODB.tests.StorageTestBase import zodb_unpickle
from cPickle import Unpickler
from cStringIO import StringIO
import md5
import struct
import types
def get_pickle_metadata(data):
# ZODB's data records contain two pickles. The first is the class
# of the object, the second is the object.
if data.startswith('(c'):
# Don't actually unpickle a class, because it will attempt to
# load the class. Just break open the pickle and get the
# module and class from it.
modname, classname, rest = data.split('\n', 2)
modname = modname[2:]
return modname, classname
f = StringIO(data)
u = Unpickler(f)
try:
class_info = u.load()
except Exception, err:
print "Error", err
return '', ''
if isinstance(class_info, types.TupleType):
if isinstance(class_info[0], types.TupleType):
modname, classname = class_info[0]
else:
modname, classname = class_info
else:
# XXX not sure what to do here
modname = repr(class_info)
classname = ''
return modname, classname
def fsdump(path, file=None, with_offset=1):
i = 0
iter = FileIterator(path)
for trans in iter:
if with_offset:
print >> file, "Trans #%05d tid=%016x time=%s offset=%d" % \
(i, u64(trans.tid), str(TimeStamp(trans.tid)), trans._pos)
else:
print >> file, "Trans #%05d tid=%016x time=%s" % \
(i, u64(trans.tid), str(TimeStamp(trans.tid)))
print >> file, "\tstatus=%s user=%s description=%s" % \
(`trans.status`, trans.user, trans.description)
j = 0
for rec in trans:
if rec.data is None:
fullclass = "undo or abort of object creation"
else:
modname, classname = get_pickle_metadata(rec.data)
dig = md5.new(rec.data).hexdigest()
fullclass = "%s.%s" % (modname, classname)
# special case for testing purposes
if fullclass == "ZODB.tests.MinPO.MinPO":
obj = zodb_unpickle(rec.data)
fullclass = "%s %s" % (fullclass, obj.value)
if rec.version:
version = "version=%s " % rec.version
else:
version = ''
if rec.data_txn:
# XXX It would be nice to print the transaction number
# (i) but it would be too expensive to keep track of.
bp = "bp=%016x" % u64(rec.data_txn)
else:
bp = ""
print >> file, " data #%05d oid=%016x %sclass=%s %s" % \
(j, u64(rec.oid), version, fullclass, bp)
j += 1
print >> file
i += 1
iter.close()
def fmt(p64):
# Return a nicely formatted string for a packaged 64-bit value
return "%016x" % u64(p64)
class Dumper:
"""A very verbose dumper for debuggin FileStorage problems."""
# XXX Should revise this class to use FileStorageFormatter.
def __init__(self, path, dest=None):
self.file = open(path, "rb")
self.dest = dest
def dump(self):
fid = self.file.read(4)
print >> self.dest, "*" * 60
print >> self.dest, "file identifier: %r" % fid
while self.dump_txn():
pass
def dump_txn(self):
pos = self.file.tell()
h = self.file.read(TRANS_HDR_LEN)
if not h:
return False
tid, tlen, status, ul, dl, el = struct.unpack(TRANS_HDR, h)
end = pos + tlen
print >> self.dest, "=" * 60
print >> self.dest, "offset: %d" % pos
print >> self.dest, "end pos: %d" % end
print >> self.dest, "transaction id: %s" % fmt(tid)
print >> self.dest, "trec len: %d" % tlen
print >> self.dest, "status: %r" % status
user = descr = extra = ""
if ul:
user = self.file.read(ul)
if dl:
descr = self.file.read(dl)
if el:
extra = self.file.read(el)
print >> self.dest, "user: %r" % user
print >> self.dest, "description: %r" % descr
print >> self.dest, "len(extra): %d" % el
while self.file.tell() < end:
self.dump_data(pos)
stlen = self.file.read(8)
print >> self.dest, "redundant trec len: %d" % u64(stlen)
return 1
def dump_data(self, tloc):
pos = self.file.tell()
h = self.file.read(DATA_HDR_LEN)
assert len(h) == DATA_HDR_LEN
oid, revid, prev, tloc, vlen, dlen = struct.unpack(DATA_HDR, h)
print >> self.dest, "-" * 60
print >> self.dest, "offset: %d" % pos
print >> self.dest, "oid: %s" % fmt(oid)
print >> self.dest, "revid: %s" % fmt(revid)
print >> self.dest, "previous record offset: %d" % prev
print >> self.dest, "transaction offset: %d" % tloc
if vlen:
pnv = self.file.read(8)
sprevdata = self.file.read(8)
version = self.file.read(vlen)
print >> self.dest, "version: %r" % version
print >> self.dest, "non-version data offset: %d" % u64(pnv)
print >> self.dest, \
"previous version data offset: %d" % u64(sprevdata)
print >> self.dest, "len(data): %d" % dlen
self.file.read(dlen)
if not dlen:
sbp = self.file.read(8)
print >> self.dest, "backpointer: %d" % u64(sbp)
This diff is collapsed.
##############################################################################
#
# Copyright (c) 2001, 2002, 2003 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""Very Simple Mapping ZODB storage
The Mapping storage provides an extremely simple storage implementation that
doesn't provide undo or version support.
It is meant to illustrate the simplest possible storage.
The Mapping storage uses a single data structure to map object ids to data.
"""
__version__='$Revision: 1.14 $'[11:-2]
from ZODB import utils
from ZODB import BaseStorage
from ZODB import POSException
from persistent.TimeStamp import TimeStamp
class MappingStorage(BaseStorage.BaseStorage):
def __init__(self, name='Mapping Storage'):
BaseStorage.BaseStorage.__init__(self, name)
self._index = {}
self._tindex = []
self._ltid = None
# Note: If you subclass this and use a persistent mapping facility
# (e.g. a dbm file), you will need to get the maximum key and save it
# as self._oid. See dbmStorage.
def __len__(self):
return len(self._index)
def getSize(self):
# These constants are for Python object memory overheads
s = 32
for oid in self._index.keys():
p = self._index[oid]
s += 56 + len(p)
return s
def load(self, oid, version):
self._lock_acquire()
try:
p = self._index[oid]
return p[8:], p[:8] # pickle, serial
finally:
self._lock_release()
def loadEx(self, oid, version):
self._lock_acquire()
try:
# Since this storage doesn't support versions, tid and
# serial will always be the same.
p = self._index[oid]
return p[8:], p[:8], "" # pickle, tid, version
finally:
self._lock_release()
def getTid(self, oid):
self._lock_acquire()
try:
# The tid is the first 8 bytes of the buffer.
s = self._index[oid]
return s[:8]
finally:
self._lock_release()
def store(self, oid, serial, data, version, transaction):
if transaction is not self._transaction:
raise POSException.StorageTransactionError(self, transaction)
if version:
raise POSException.Unsupported, "Versions aren't supported"
self._lock_acquire()
try:
if self._index.has_key(oid):
old = self._index[oid]
oserial = old[:8]
if serial != oserial:
raise POSException.ConflictError(oid=oid,
serials=(oserial, serial),
data=data)
self._tindex.append((oid, self._tid + data))
finally:
self._lock_release()
return self._tid
def _clear_temp(self):
self._tindex = []
def _finish(self, tid, user, desc, ext):
for oid, p in self._tindex:
self._index[oid] = p
self._ltid = self._tid
def lastTransaction(self):
return self._ltid
def pack(self, t, referencesf):
self._lock_acquire()
try:
if not self._index:
return
# Build an index of *only* those objects reachable from the root.
rootl = ['\0\0\0\0\0\0\0\0']
pindex = {}
while rootl:
oid = rootl.pop()
if pindex.has_key(oid):
continue
# Scan non-version pickle for references
r = self._index[oid]
pindex[oid] = r
p = r[8:]
referencesf(p, rootl)
# Now delete any unreferenced entries:
for oid in self._index.keys():
if not pindex.has_key(oid):
del self._index[oid]
finally:
self._lock_release()
def _splat(self):
"""Spit out a string showing state."""
o = []
o.append('Index:')
keys = self._index.keys()
keys.sort()
for oid in keys:
r = self._index[oid]
o.append(' %s: %s, %s' %
(utils.u64(oid),TimeStamp(r[:8]),`r[8:]`))
return '\n'.join(o)
This diff is collapsed.
This diff is collapsed.
# Extension information for zpkg.
<extension winlock>
source winlock.c
</extension>
This diff is collapsed.
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""Provide backward compatibility with storages that only have undoLog()."""
class UndoLogCompatible:
def undoInfo(self, first=0, last=-20, specification=None):
if specification:
def filter(desc, spec=specification.items()):
get=desc.get
for k, v in spec:
if get(k, None) != v:
return 0
return 1
else: filter=None
return self.undoLog(first, last, filter)
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""Implement an bobo_application object that is BoboPOS3 aware
This module provides a wrapper that causes a database connection to be created
and used when bobo publishes a bobo_application object.
"""
__version__='$Revision: 1.14 $'[11:-2]
import transaction
connection_open_hooks = []
class ZApplicationWrapper:
def __init__(self, db, name, klass= None, klass_args= (),
version_cookie_name=None):
self._stuff = db, name, version_cookie_name
if klass is not None:
conn=db.open()
root=conn.root()
if not root.has_key(name):
root[name]=klass()
transaction.commit()
conn.close()
self._klass=klass
# This hack is to overcome a bug in Bobo!
def __getattr__(self, name):
return getattr(self._klass, name)
def __bobo_traverse__(self, REQUEST=None, name=None):
db, aname, version_support = self._stuff
if version_support is not None and REQUEST is not None:
version=REQUEST.get(version_support,'')
else: version=''
conn=db.open(version)
if connection_open_hooks:
for hook in connection_open_hooks:
hook(conn)
# arrange for the connection to be closed when the request goes away
cleanup = Cleanup(conn)
REQUEST._hold(cleanup)
conn.setDebugInfo(REQUEST.environ, REQUEST.other)
v=conn.root()[aname]
if name is not None:
if hasattr(v, '__bobo_traverse__'):
return v.__bobo_traverse__(REQUEST, name)
if hasattr(v,name): return getattr(v,name)
return v[name]
return v
def __call__(self, connection=None):
db, aname, version_support = self._stuff
if connection is None:
connection=db.open()
elif isinstance(type, basestring):
connection=db.open(connection)
return connection.root()[aname]
class Cleanup:
def __init__(self, jar):
self._jar = jar
def __del__(self):
self._jar.close()
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
# Having this makes debugging better.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment