Commit ce6a27bc authored by Nicolas Delaby's avatar Nicolas Delaby

add support of new parameters for memcached client

git-svn-id: https://svn.erp5.org/repos/public/erp5/trunk@27580 20353a03-c40f-0410-a6d1-a30d3c3de9de
parent af7ccf6f
......@@ -39,7 +39,6 @@ import zope.interface
try:
import memcache
MEMCACHED_SERVER_MAX_KEY_LENGTH = memcache.SERVER_MAX_KEY_LENGTH
except ImportError:
LOG('DistributedRamCache', 0, 'unable to import memcache')
......@@ -55,7 +54,9 @@ class DistributedRamCache(BaseCache):
def __init__(self, params={}):
self._servers = params.get('server', '')
self._debugLevel = params.get('debugLevel', 0)
self._server_max_key_length = params.get('server_max_key_length', 250)
self._server_max_value_length = params.get('server_max_value_length', 1024*1024)
self._debug_level = params.get('debug_level', 0)
BaseCache.__init__(self)
def initCacheStorage(self):
......@@ -74,9 +75,12 @@ class DistributedRamCache(BaseCache):
memcache_conn = connection_pool.get(thread_id, None)
if memcache_conn is None:
## we don't have memcache_conn for this thread
memcache_conn = memcache.Client(self._servers.split('\n'), debug=self._debugLevel)
memcache_conn = memcache.Client(self._servers.split('\n'),
debug=self._debug_level,
server_max_key_length=self._server_max_key_length,
server_max_value_length=self._server_max_value_length)
connection_pool[thread_id] = memcache_conn
return memcache_conn
return memcache_conn
else:
## we have memcache_conn for this thread
return memcache_conn
......@@ -84,24 +88,28 @@ class DistributedRamCache(BaseCache):
def checkAndFixCacheId(self, cache_id, scope):
## memcached doesn't support namespaces (cache scopes) so to "emmulate"
## such behaviour when constructing cache_id we add scope in front
cache_id = "%s.%s" %(scope, cache_id)
## memcached will fail to store cache_id longer than MEMCACHED_SERVER_MAX_KEY_LENGTH.
return cache_id[:MEMCACHED_SERVER_MAX_KEY_LENGTH]
cache_id = "%s.%s" %(scope, cache_id)
if self._server_max_key_length != 0:
## memcached will fail to store cache_id longer than MEMCACHED_SERVER_MAX_KEY_LENGTH.
return cache_id[:self._server_max_key_length]
return cache_id
def get(self, cache_id, scope, default=None):
cache_storage = self.getCacheStorage()
cache_id = self.checkAndFixCacheId(cache_id, scope)
cache_entry = cache_storage.get(cache_id)
self.markCacheHit()
return cache_entry
def set(self, cache_id, scope, value, cache_duration= None, calculation_time=0):
if cache_entry is not None:
# since some memcached-like products don't support expiration, we
# check it by ourselves.
if cache_entry.isExpired():
cache_storage.delete(cache_id)
return default
self.markCacheHit()
return cache_entry or default
def set(self, cache_id, scope, value, cache_duration=None, calculation_time=0):
cache_storage = self.getCacheStorage()
cache_id = self.checkAndFixCacheId(cache_id, scope)
if cache_duration is None:
## what should be default cache_duration when None is specified?
## currently when 'None' it means forever so give it big value of 24 hours
cache_duration = 86400
cache_entry = CacheEntry(value, cache_duration, calculation_time)
cache_storage.set(cache_id, cache_entry, cache_duration)
self.markCacheMiss()
......@@ -114,7 +122,7 @@ class DistributedRamCache(BaseCache):
## The workaround of this problem is to create a new connection for every cache access
## but that's too much overhead or create a new connection when cache is to be expired.
## This way we can catch memcached server failures. BTW: This hack is forced by the lack functionality in python-memcached
#self._cache = memcache.Client(self._servers.split('\n'), debug=self._debugLevel)
#self._cache = memcache.Client(self._servers.split('\n'), debug=self._debug_level)
pass
def delete(self, cache_id, scope):
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment