Commit 3088f386 authored by Nicolas Delaby's avatar Nicolas Delaby

CachePlugins impliment ICachePlugin Interface

git-svn-id: https://svn.erp5.org/repos/public/erp5/trunk@27013 20353a03-c40f-0410-a6d1-a30d3c3de9de
parent 1d692378
# -*- coding: utf-8 -*-
############################################################################## ##############################################################################
# #
# Copyright (c) 2005 Nexedi SARL and Contributors. All Rights Reserved. # Copyright (c) 2005 Nexedi SARL and Contributors. All Rights Reserved.
...@@ -33,6 +34,8 @@ from thread import get_ident ...@@ -33,6 +34,8 @@ from thread import get_ident
from zLOG import LOG from zLOG import LOG
from BaseCache import BaseCache from BaseCache import BaseCache
from BaseCache import CacheEntry from BaseCache import CacheEntry
from Products.ERP5Type import Interface
import zope.interface
try: try:
import memcache import memcache
...@@ -42,28 +45,32 @@ except ImportError: ...@@ -42,28 +45,32 @@ except ImportError:
## global ditionary containing connection objects ## global ditionary containing connection objects
connection_pool = {} connection_pool = {}
class DistributedRamCache(BaseCache): class DistributedRamCache(BaseCache):
""" Memcached based cache plugin. """ """ Memcached based cache plugin. """
def __init__(self, params): zope.interface.implements(
Interface.ICachePlugin
)
def __init__(self, params={}):
self._servers = params.get('server', '') self._servers = params.get('server', '')
self._debugLevel = params.get('debugLevel', 0) self._debugLevel = params.get('debugLevel', 0)
BaseCache.__init__(self) BaseCache.__init__(self)
def initCacheStorage(self): def initCacheStorage(self):
""" Init cache storage """ """ Init cache storage """
## cache storage is a memcached server and no need to init it ## cache storage is a memcached server and no need to init it
pass pass
def getCacheStorage(self): def getCacheStorage(self, **kw):
## if we use one connection object this causes ## if we use one connection object this causes
## "MemCached: while expecting 'STORED', got unexpected response 'END'" ## "MemCached: while expecting 'STORED', got unexpected response 'END'"
## messages in log files and can sometimes can block the thread. ## messages in log files and can sometimes can block the thread.
## For the moment we create a new conn object for every thread. ## For the moment we create a new conn object for every thread.
global connection_pool global connection_pool
thread_id = get_ident() thread_id = get_ident()
memcache_conn = connection_pool.get(thread_id, None) memcache_conn = connection_pool.get(thread_id, None)
if memcache_conn is None: if memcache_conn is None:
## we don't have memcache_conn for this thread ## we don't have memcache_conn for this thread
...@@ -73,21 +80,21 @@ class DistributedRamCache(BaseCache): ...@@ -73,21 +80,21 @@ class DistributedRamCache(BaseCache):
else: else:
## we have memcache_conn for this thread ## we have memcache_conn for this thread
return memcache_conn return memcache_conn
def checkAndFixCacheId(self, cache_id, scope): def checkAndFixCacheId(self, cache_id, scope):
## memcached doesn't support namespaces (cache scopes) so to "emmulate" ## memcached doesn't support namespaces (cache scopes) so to "emmulate"
## such behaviour when constructing cache_id we add scope in front ## such behaviour when constructing cache_id we add scope in front
cache_id = "%s.%s" %(scope, cache_id) cache_id = "%s.%s" %(scope, cache_id)
## memcached will fail to store cache_id longer than MEMCACHED_SERVER_MAX_KEY_LENGTH. ## memcached will fail to store cache_id longer than MEMCACHED_SERVER_MAX_KEY_LENGTH.
return cache_id[:MEMCACHED_SERVER_MAX_KEY_LENGTH] return cache_id[:MEMCACHED_SERVER_MAX_KEY_LENGTH]
def get(self, cache_id, scope, default=None): def get(self, cache_id, scope, default=None):
cache_storage = self.getCacheStorage() cache_storage = self.getCacheStorage()
cache_id = self.checkAndFixCacheId(cache_id, scope) cache_id = self.checkAndFixCacheId(cache_id, scope)
cache_entry = cache_storage.get(cache_id) cache_entry = cache_storage.get(cache_id)
#self.markCacheHit() self.markCacheHit()
return cache_entry return cache_entry
def set(self, cache_id, scope, value, cache_duration= None, calculation_time=0): def set(self, cache_id, scope, value, cache_duration= None, calculation_time=0):
cache_storage = self.getCacheStorage() cache_storage = self.getCacheStorage()
cache_id = self.checkAndFixCacheId(cache_id, scope) cache_id = self.checkAndFixCacheId(cache_id, scope)
...@@ -97,8 +104,8 @@ class DistributedRamCache(BaseCache): ...@@ -97,8 +104,8 @@ class DistributedRamCache(BaseCache):
cache_duration = 86400 cache_duration = 86400
cache_entry = CacheEntry(value, cache_duration, calculation_time) cache_entry = CacheEntry(value, cache_duration, calculation_time)
cache_storage.set(cache_id, cache_entry, cache_duration) cache_storage.set(cache_id, cache_entry, cache_duration)
#self.markCacheMiss() self.markCacheMiss()
def expireOldCacheEntries(self, forceCheck = False): def expireOldCacheEntries(self, forceCheck = False):
""" Memcache has its own built in expire policy """ """ Memcache has its own built in expire policy """
## we can not use one connection to memcached server for time being of DistributedRamCache ## we can not use one connection to memcached server for time being of DistributedRamCache
...@@ -108,13 +115,13 @@ class DistributedRamCache(BaseCache): ...@@ -108,13 +115,13 @@ class DistributedRamCache(BaseCache):
## but that's too much overhead or create a new connection when cache is to be expired. ## but that's too much overhead or create a new connection when cache is to be expired.
## This way we can catch memcached server failures. BTW: This hack is forced by the lack functionality in python-memcached ## This way we can catch memcached server failures. BTW: This hack is forced by the lack functionality in python-memcached
#self._cache = memcache.Client(self._servers.split('\n'), debug=self._debugLevel) #self._cache = memcache.Client(self._servers.split('\n'), debug=self._debugLevel)
pass pass
def delete(self, cache_id, scope): def delete(self, cache_id, scope):
cache_storage = self.getCacheStorage() cache_storage = self.getCacheStorage()
cache_id = self.checkAndFixCacheId(cache_id, scope) cache_id = self.checkAndFixCacheId(cache_id, scope)
cache_storage.delete(cache_id) cache_storage.delete(cache_id)
def has_key(self, cache_id, scope): def has_key(self, cache_id, scope):
if self.get(cache_id, scope): if self.get(cache_id, scope):
return True return True
...@@ -124,7 +131,7 @@ class DistributedRamCache(BaseCache): ...@@ -124,7 +131,7 @@ class DistributedRamCache(BaseCache):
def getScopeList(self): def getScopeList(self):
## memcached doesn't support namespaces (cache scopes) neither getting cached key list ## memcached doesn't support namespaces (cache scopes) neither getting cached key list
return [] return []
def getScopeKeyList(self, scope): def getScopeKeyList(self, scope):
## memcached doesn't support namespaces (cache scopes) neither getting cached key list ## memcached doesn't support namespaces (cache scopes) neither getting cached key list
return [] return []
...@@ -133,12 +140,12 @@ class DistributedRamCache(BaseCache): ...@@ -133,12 +140,12 @@ class DistributedRamCache(BaseCache):
BaseCache.clearCache(self) BaseCache.clearCache(self)
cache_storage = self.getCacheStorage() cache_storage = self.getCacheStorage()
cache_storage.flush_all() cache_storage.flush_all()
def clearCacheForScope(self, scope): def clearCacheForScope(self, scope):
## memcached doesn't support namespaces (cache scopes) neither getting cached key list. ## memcached doesn't support namespaces (cache scopes) neither getting cached key list.
## Becasue we've explicitly called this function instead of clearing specific cache ## Becasue we've explicitly called this function instead of clearing specific cache
## scope we have no choice but clear whole cache. ## scope we have no choice but clear whole cache.
self.clearCache() self.clearCache()
def getCachePluginTotalMemorySize(self): def getCachePluginTotalMemorySize(self):
""" Calculate total RAM memory size of cache plugin. """ """ Calculate total RAM memory size of cache plugin. """
......
# -*- coding: utf-8 -*-
############################################################################## ##############################################################################
# #
# Copyright (c) 2005 Nexedi SARL and Contributors. All Rights Reserved. # Copyright (c) 2005 Nexedi SARL and Contributors. All Rights Reserved.
...@@ -32,6 +33,8 @@ Local RAM based cache plugin. ...@@ -32,6 +33,8 @@ Local RAM based cache plugin.
import time import time
from BaseCache import BaseCache, CacheEntry from BaseCache import BaseCache, CacheEntry
from Products.ERP5Type import Interface
import zope.interface
def calcPythonObjectMemorySize(i): def calcPythonObjectMemorySize(i):
""" Recursive function that will 'walk' over complex python types and caclulate """ Recursive function that will 'walk' over complex python types and caclulate
...@@ -48,7 +51,11 @@ def calcPythonObjectMemorySize(i): ...@@ -48,7 +51,11 @@ def calcPythonObjectMemorySize(i):
class RamCache(BaseCache): class RamCache(BaseCache):
""" RAM based cache plugin.""" """ RAM based cache plugin."""
zope.interface.implements(
Interface.ICachePlugin
)
_cache_dict = {} _cache_dict = {}
cache_expire_check_interval = 300 cache_expire_check_interval = 300
...@@ -60,7 +67,7 @@ class RamCache(BaseCache): ...@@ -60,7 +67,7 @@ class RamCache(BaseCache):
## cache storage is a RAM based dictionary ## cache storage is a RAM based dictionary
pass pass
def getCacheStorage(self): def getCacheStorage(self, **kw):
return self._cache_dict return self._cache_dict
def get(self, cache_id, scope, default=None): def get(self, cache_id, scope, default=None):
...@@ -110,18 +117,18 @@ class RamCache(BaseCache): ...@@ -110,18 +117,18 @@ class RamCache(BaseCache):
for scope, cache_id in self.getCacheStorage().iterkeys(): for scope, cache_id in self.getCacheStorage().iterkeys():
scope_set.add(scope) scope_set.add(scope)
return list(scope_set) return list(scope_set)
def getScopeKeyList(self, scope): def getScopeKeyList(self, scope):
key_list = [] key_list = []
for key in self.getCacheStorage().iterkeys(): for key in self.getCacheStorage().iterkeys():
if scope == key[0]: if scope == key[0]:
key_list.append(key[1]) key_list.append(key[1])
return key_list return key_list
def clearCache(self): def clearCache(self):
BaseCache.clearCache(self) BaseCache.clearCache(self)
self.getCacheStorage().clear() self.getCacheStorage().clear()
def clearCacheForScope(self, scope): def clearCacheForScope(self, scope):
cache = self.getCacheStorage() cache = self.getCacheStorage()
for key in cache.keys(): for key in cache.keys():
...@@ -131,7 +138,7 @@ class RamCache(BaseCache): ...@@ -131,7 +138,7 @@ class RamCache(BaseCache):
except KeyError: except KeyError:
# The key might have disappeared, due to multi-threading. # The key might have disappeared, due to multi-threading.
pass pass
def getCachePluginTotalMemorySize(self): def getCachePluginTotalMemorySize(self):
""" Calculate total RAM memory size of cache plugin. """ Calculate total RAM memory size of cache plugin.
This function depends on mxBase python module: This function depends on mxBase python module:
......
# -*- coding: utf-8 -*-
############################################################################## ##############################################################################
# #
# Copyright (c) 2005 Nexedi SARL and Contributors. All Rights Reserved. # Copyright (c) 2005 Nexedi SARL and Contributors. All Rights Reserved.
...@@ -35,6 +36,8 @@ import time ...@@ -35,6 +36,8 @@ import time
import base64 import base64
from zLOG import LOG from zLOG import LOG
from BaseCache import BaseCache, CacheEntry, CachedMethodError from BaseCache import BaseCache, CacheEntry, CachedMethodError
from Products.ERP5Type import Interface
import zope.interface
try: try:
import cPickle as pickle import cPickle as pickle
...@@ -58,7 +61,11 @@ connection_pool = {} ...@@ -58,7 +61,11 @@ connection_pool = {}
class SQLCache(BaseCache): class SQLCache(BaseCache):
""" SQL based cache plugin. """ """ SQL based cache plugin. """
zope.interface.implements(
Interface.ICachePlugin
)
cache_expire_check_interval = 3600 cache_expire_check_interval = 3600
create_table_sql = '''CREATE TABLE %s(cache_id VARBINARY(970) NOT NULL, create_table_sql = '''CREATE TABLE %s(cache_id VARBINARY(970) NOT NULL,
...@@ -115,7 +122,7 @@ class SQLCache(BaseCache): ...@@ -115,7 +122,7 @@ class SQLCache(BaseCache):
find_table_by_name_sql = """SHOW TABLES LIKE '%s' """ find_table_by_name_sql = """SHOW TABLES LIKE '%s' """
def __init__(self, params): def __init__(self, params={}):
BaseCache.__init__(self) BaseCache.__init__(self)
self._dbConn = None self._dbConn = None
self._db_server = params.get('server', '') self._db_server = params.get('server', '')
...@@ -139,12 +146,13 @@ class SQLCache(BaseCache): ...@@ -139,12 +146,13 @@ class SQLCache(BaseCache):
## no such table create it ## no such table create it
self.execSQLQuery(self.create_table_sql %self._db_cache_table_name) self.execSQLQuery(self.create_table_sql %self._db_cache_table_name)
def getCacheStorage(self, force_reconnect=False): def getCacheStorage(self, **kw):
""" """
Return current DB connection or create a new one for this thread. Return current DB connection or create a new one for this thread.
See http://sourceforge.net/docman/display_doc.php?docid=32071&group_id=22307 See http://sourceforge.net/docman/display_doc.php?docid=32071&group_id=22307
especially threadsafety part why we create for every thread a new MySQL db connection object. especially threadsafety part why we create for every thread a new MySQL db connection object.
""" """
force_reconnect = kw.get('force_reconnect', False)
global connection_pool global connection_pool
thread_id = get_ident() thread_id = get_ident()
......
...@@ -33,12 +33,18 @@ ZODB Based cache plugin. ...@@ -33,12 +33,18 @@ ZODB Based cache plugin.
import time import time
from BaseCache import BaseCache, CacheEntry from BaseCache import BaseCache, CacheEntry
from BTrees.OOBTree import OOBTree from BTrees.OOBTree import OOBTree
from Products.ERP5Type import Interface
import zope.interface
PRIVATE_ATTRIBUTE_ZODB_CACHE_NAME = '_zodb_cache' PRIVATE_ATTRIBUTE_ZODB_CACHE_NAME = '_zodb_cache'
class ZODBCache(BaseCache): class ZODBCache(BaseCache):
""" ZODB based cache plugin.""" """ ZODB based cache plugin."""
zope.interface.implements(
Interface.ICachePlugin
)
cache_tool = None cache_tool = None
cache_expire_check_interval = 300 cache_expire_check_interval = 300
...@@ -55,7 +61,7 @@ class ZODBCache(BaseCache): ...@@ -55,7 +61,7 @@ class ZODBCache(BaseCache):
if getattr(self.cache_tool, PRIVATE_ATTRIBUTE_ZODB_CACHE_NAME, None) is None: if getattr(self.cache_tool, PRIVATE_ATTRIBUTE_ZODB_CACHE_NAME, None) is None:
self.cache_tool._zodb_cache = OOBTree() self.cache_tool._zodb_cache = OOBTree()
def getCacheStorage(self): def getCacheStorage(self, **kw):
return getattr(self.cache_tool, PRIVATE_ATTRIBUTE_ZODB_CACHE_NAME) return getattr(self.cache_tool, PRIVATE_ATTRIBUTE_ZODB_CACHE_NAME)
def get(self, cache_id, scope, default=None): def get(self, cache_id, scope, default=None):
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment