Commit 3088f386 authored by Nicolas Delaby's avatar Nicolas Delaby

CachePlugins impliment ICachePlugin Interface

git-svn-id: https://svn.erp5.org/repos/public/erp5/trunk@27013 20353a03-c40f-0410-a6d1-a30d3c3de9de
parent 1d692378
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2005 Nexedi SARL and Contributors. All Rights Reserved.
......@@ -33,6 +34,8 @@ from thread import get_ident
from zLOG import LOG
from BaseCache import BaseCache
from BaseCache import CacheEntry
from Products.ERP5Type import Interface
import zope.interface
try:
import memcache
......@@ -42,28 +45,32 @@ except ImportError:
## global ditionary containing connection objects
connection_pool = {}
class DistributedRamCache(BaseCache):
""" Memcached based cache plugin. """
def __init__(self, params):
zope.interface.implements(
Interface.ICachePlugin
)
def __init__(self, params={}):
self._servers = params.get('server', '')
self._debugLevel = params.get('debugLevel', 0)
BaseCache.__init__(self)
def initCacheStorage(self):
""" Init cache storage """
## cache storage is a memcached server and no need to init it
pass
def getCacheStorage(self):
def getCacheStorage(self, **kw):
## if we use one connection object this causes
## "MemCached: while expecting 'STORED', got unexpected response 'END'"
## messages in log files and can sometimes can block the thread.
## For the moment we create a new conn object for every thread.
global connection_pool
thread_id = get_ident()
memcache_conn = connection_pool.get(thread_id, None)
if memcache_conn is None:
## we don't have memcache_conn for this thread
......@@ -73,21 +80,21 @@ class DistributedRamCache(BaseCache):
else:
## we have memcache_conn for this thread
return memcache_conn
def checkAndFixCacheId(self, cache_id, scope):
## memcached doesn't support namespaces (cache scopes) so to "emmulate"
## such behaviour when constructing cache_id we add scope in front
cache_id = "%s.%s" %(scope, cache_id)
## memcached will fail to store cache_id longer than MEMCACHED_SERVER_MAX_KEY_LENGTH.
return cache_id[:MEMCACHED_SERVER_MAX_KEY_LENGTH]
def get(self, cache_id, scope, default=None):
cache_storage = self.getCacheStorage()
cache_id = self.checkAndFixCacheId(cache_id, scope)
cache_entry = cache_storage.get(cache_id)
#self.markCacheHit()
self.markCacheHit()
return cache_entry
def set(self, cache_id, scope, value, cache_duration= None, calculation_time=0):
cache_storage = self.getCacheStorage()
cache_id = self.checkAndFixCacheId(cache_id, scope)
......@@ -97,8 +104,8 @@ class DistributedRamCache(BaseCache):
cache_duration = 86400
cache_entry = CacheEntry(value, cache_duration, calculation_time)
cache_storage.set(cache_id, cache_entry, cache_duration)
#self.markCacheMiss()
self.markCacheMiss()
def expireOldCacheEntries(self, forceCheck = False):
""" Memcache has its own built in expire policy """
## we can not use one connection to memcached server for time being of DistributedRamCache
......@@ -108,13 +115,13 @@ class DistributedRamCache(BaseCache):
## but that's too much overhead or create a new connection when cache is to be expired.
## This way we can catch memcached server failures. BTW: This hack is forced by the lack functionality in python-memcached
#self._cache = memcache.Client(self._servers.split('\n'), debug=self._debugLevel)
pass
pass
def delete(self, cache_id, scope):
cache_storage = self.getCacheStorage()
cache_id = self.checkAndFixCacheId(cache_id, scope)
cache_storage.delete(cache_id)
def has_key(self, cache_id, scope):
if self.get(cache_id, scope):
return True
......@@ -124,7 +131,7 @@ class DistributedRamCache(BaseCache):
def getScopeList(self):
## memcached doesn't support namespaces (cache scopes) neither getting cached key list
return []
def getScopeKeyList(self, scope):
## memcached doesn't support namespaces (cache scopes) neither getting cached key list
return []
......@@ -133,12 +140,12 @@ class DistributedRamCache(BaseCache):
BaseCache.clearCache(self)
cache_storage = self.getCacheStorage()
cache_storage.flush_all()
def clearCacheForScope(self, scope):
## memcached doesn't support namespaces (cache scopes) neither getting cached key list.
## Becasue we've explicitly called this function instead of clearing specific cache
## scope we have no choice but clear whole cache.
self.clearCache()
self.clearCache()
def getCachePluginTotalMemorySize(self):
""" Calculate total RAM memory size of cache plugin. """
......
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2005 Nexedi SARL and Contributors. All Rights Reserved.
......@@ -32,6 +33,8 @@ Local RAM based cache plugin.
import time
from BaseCache import BaseCache, CacheEntry
from Products.ERP5Type import Interface
import zope.interface
def calcPythonObjectMemorySize(i):
""" Recursive function that will 'walk' over complex python types and caclulate
......@@ -48,7 +51,11 @@ def calcPythonObjectMemorySize(i):
class RamCache(BaseCache):
""" RAM based cache plugin."""
zope.interface.implements(
Interface.ICachePlugin
)
_cache_dict = {}
cache_expire_check_interval = 300
......@@ -60,7 +67,7 @@ class RamCache(BaseCache):
## cache storage is a RAM based dictionary
pass
def getCacheStorage(self):
def getCacheStorage(self, **kw):
return self._cache_dict
def get(self, cache_id, scope, default=None):
......@@ -110,18 +117,18 @@ class RamCache(BaseCache):
for scope, cache_id in self.getCacheStorage().iterkeys():
scope_set.add(scope)
return list(scope_set)
def getScopeKeyList(self, scope):
key_list = []
for key in self.getCacheStorage().iterkeys():
if scope == key[0]:
key_list.append(key[1])
return key_list
def clearCache(self):
BaseCache.clearCache(self)
self.getCacheStorage().clear()
def clearCacheForScope(self, scope):
cache = self.getCacheStorage()
for key in cache.keys():
......@@ -131,7 +138,7 @@ class RamCache(BaseCache):
except KeyError:
# The key might have disappeared, due to multi-threading.
pass
def getCachePluginTotalMemorySize(self):
""" Calculate total RAM memory size of cache plugin.
This function depends on mxBase python module:
......
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2005 Nexedi SARL and Contributors. All Rights Reserved.
......@@ -35,6 +36,8 @@ import time
import base64
from zLOG import LOG
from BaseCache import BaseCache, CacheEntry, CachedMethodError
from Products.ERP5Type import Interface
import zope.interface
try:
import cPickle as pickle
......@@ -58,7 +61,11 @@ connection_pool = {}
class SQLCache(BaseCache):
""" SQL based cache plugin. """
zope.interface.implements(
Interface.ICachePlugin
)
cache_expire_check_interval = 3600
create_table_sql = '''CREATE TABLE %s(cache_id VARBINARY(970) NOT NULL,
......@@ -115,7 +122,7 @@ class SQLCache(BaseCache):
find_table_by_name_sql = """SHOW TABLES LIKE '%s' """
def __init__(self, params):
def __init__(self, params={}):
BaseCache.__init__(self)
self._dbConn = None
self._db_server = params.get('server', '')
......@@ -139,12 +146,13 @@ class SQLCache(BaseCache):
## no such table create it
self.execSQLQuery(self.create_table_sql %self._db_cache_table_name)
def getCacheStorage(self, force_reconnect=False):
def getCacheStorage(self, **kw):
"""
Return current DB connection or create a new one for this thread.
See http://sourceforge.net/docman/display_doc.php?docid=32071&group_id=22307
especially threadsafety part why we create for every thread a new MySQL db connection object.
"""
force_reconnect = kw.get('force_reconnect', False)
global connection_pool
thread_id = get_ident()
......
......@@ -33,12 +33,18 @@ ZODB Based cache plugin.
import time
from BaseCache import BaseCache, CacheEntry
from BTrees.OOBTree import OOBTree
from Products.ERP5Type import Interface
import zope.interface
PRIVATE_ATTRIBUTE_ZODB_CACHE_NAME = '_zodb_cache'
class ZODBCache(BaseCache):
""" ZODB based cache plugin."""
zope.interface.implements(
Interface.ICachePlugin
)
cache_tool = None
cache_expire_check_interval = 300
......@@ -55,7 +61,7 @@ class ZODBCache(BaseCache):
if getattr(self.cache_tool, PRIVATE_ATTRIBUTE_ZODB_CACHE_NAME, None) is None:
self.cache_tool._zodb_cache = OOBTree()
def getCacheStorage(self):
def getCacheStorage(self, **kw):
return getattr(self.cache_tool, PRIVATE_ATTRIBUTE_ZODB_CACHE_NAME)
def get(self, cache_id, scope, default=None):
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment