Commit c65e3621 authored by Hanno Schlichting's avatar Hanno Schlichting

Added date range index improvements from ``experimental.catalogqueryplan``.

parent d13286d7
...@@ -26,6 +26,8 @@ Bugs Fixed ...@@ -26,6 +26,8 @@ Bugs Fixed
Features Added Features Added
++++++++++++++ ++++++++++++++
- Added date range index improvements from ``experimental.catalogqueryplan``.
- Changed policy on handling exceptions during ZCML parsing in ``Products``. - Changed policy on handling exceptions during ZCML parsing in ``Products``.
We no longer catch any exceptions in non-debug mode. We no longer catch any exceptions in non-debug mode.
......
...@@ -20,10 +20,15 @@ from AccessControl.class_init import InitializeClass ...@@ -20,10 +20,15 @@ from AccessControl.class_init import InitializeClass
from AccessControl.Permissions import manage_zcatalog_indexes from AccessControl.Permissions import manage_zcatalog_indexes
from AccessControl.Permissions import view from AccessControl.Permissions import view
from AccessControl.SecurityInfo import ClassSecurityInfo from AccessControl.SecurityInfo import ClassSecurityInfo
from Acquisition import aq_base
from Acquisition import aq_get
from Acquisition import aq_inner
from Acquisition import aq_parent
from App.Common import package_home from App.Common import package_home
from App.special_dtml import DTMLFile from App.special_dtml import DTMLFile
from BTrees.IIBTree import IISet from BTrees.IIBTree import IISet
from BTrees.IIBTree import IITreeSet from BTrees.IIBTree import IITreeSet
from BTrees.IIBTree import difference
from BTrees.IIBTree import intersection from BTrees.IIBTree import intersection
from BTrees.IIBTree import multiunion from BTrees.IIBTree import multiunion
from BTrees.IOBTree import IOBTree from BTrees.IOBTree import IOBTree
...@@ -40,6 +45,12 @@ _dtmldir = os.path.join( package_home( globals() ), 'dtml' ) ...@@ -40,6 +45,12 @@ _dtmldir = os.path.join( package_home( globals() ), 'dtml' )
MAX32 = int(2**31 - 1) MAX32 = int(2**31 - 1)
class RequestCache(dict):
def __str__(self):
return "<RequestCache %s items>" % len(self)
class DateRangeIndex(UnIndex): class DateRangeIndex(UnIndex):
"""Index for date ranges, such as the "effective-expiration" range in CMF. """Index for date ranges, such as the "effective-expiration" range in CMF.
...@@ -240,6 +251,13 @@ class DateRangeIndex(UnIndex): ...@@ -240,6 +251,13 @@ class DateRangeIndex(UnIndex):
return tuple( result ) return tuple( result )
def _cache_key(self, catalog):
cid = catalog.getId()
counter = getattr(aq_base(catalog), 'getCounter', None)
if counter is not None:
return '%s_%s' % (cid, counter())
return cid
def _apply_index(self, request, resultset=None): def _apply_index(self, request, resultset=None):
""" """
Apply the index to query parameters given in 'request', which Apply the index to query parameters given in 'request', which
...@@ -253,33 +271,71 @@ class DateRangeIndex(UnIndex): ...@@ -253,33 +271,71 @@ class DateRangeIndex(UnIndex):
second object is a tuple containing the names of all data fields second object is a tuple containing the names of all data fields
used. used.
""" """
record = parseIndexRequest(request, self.id, self.query_options) iid = self.id
record = parseIndexRequest(request, iid, self.query_options)
if record.keys is None: if record.keys is None:
return None return None
term = self._convertDateTime( record.keys[0] ) term = self._convertDateTime(record.keys[0])
# REQUEST = aq_get(self, 'REQUEST', None)
# Aggregate sets for each bucket separately, to avoid if REQUEST is not None:
# large-small union penalties. catalog = aq_parent(aq_parent(aq_inner(self)))
# if catalog is not None:
until_only = multiunion( self._until_only.values( term ) ) key = self._cache_key(catalog)
since_only = multiunion( self._since_only.values( None, term ) ) cache = REQUEST.get(key, None)
until = multiunion( self._until.values( term ) ) tid = term / 10
if resultset is None:
# Total result is bound by resultset cachekey = '_daterangeindex_%s_%s' % (iid, tid)
until = intersection(resultset, until) else:
since = multiunion(self._since.values(None, term)) cachekey = '_daterangeindex_inverse_%s_%s' % (iid, tid)
bounded = intersection(until, since) if cache is None:
cache = REQUEST[key] = RequestCache()
else:
cached = cache.get(cachekey, None)
if cached is not None:
if resultset is None:
return (cached,
(self._since_field, self._until_field))
else:
return (difference(resultset, cached),
(self._since_field, self._until_field))
if resultset is None:
# Aggregate sets for each bucket separately, to avoid
# large-small union penalties.
until_only = multiunion(self._until_only.values(term))
since_only = multiunion(self._since_only.values(None, term))
until = multiunion(self._until.values(term))
# Total result is bound by resultset
if REQUEST is None:
until = intersection(resultset, until)
since = multiunion(self._since.values(None, term))
bounded = intersection(until, since)
# Merge from smallest to largest.
result = multiunion([bounded, until_only, since_only,
self._always])
if REQUEST is not None and catalog is not None:
cache[cachekey] = result
return (result, (self._since_field, self._until_field))
else:
# Compute the inverse and subtract from res
until_only = multiunion(self._until_only.values(None, term))
since_only = multiunion(self._since_only.values(term))
until = multiunion(self._until.values(None, term))
since = multiunion(self._since.values(term))
# Merge from smallest to largest. result = multiunion([until_only, since_only, until,since])
result = multiunion([bounded, until_only, since_only, self._always]) if REQUEST is not None and catalog is not None:
cache[cachekey] = result
return result, ( self._since_field, self._until_field ) return (difference(resultset, result),
(self._since_field, self._until_field))
#
# Helper functions.
#
def _insertForwardIndexEntry( self, since, until, documentId ): def _insertForwardIndexEntry( self, since, until, documentId ):
""" """
Insert 'documentId' into the appropriate set based on Insert 'documentId' into the appropriate set based on
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment