Commit ce6a27bc authored by Nicolas Delaby's avatar Nicolas Delaby

add support of new parameters for memcached client

git-svn-id: https://svn.erp5.org/repos/public/erp5/trunk@27580 20353a03-c40f-0410-a6d1-a30d3c3de9de
parent af7ccf6f
...@@ -39,7 +39,6 @@ import zope.interface ...@@ -39,7 +39,6 @@ import zope.interface
try: try:
import memcache import memcache
MEMCACHED_SERVER_MAX_KEY_LENGTH = memcache.SERVER_MAX_KEY_LENGTH
except ImportError: except ImportError:
LOG('DistributedRamCache', 0, 'unable to import memcache') LOG('DistributedRamCache', 0, 'unable to import memcache')
...@@ -55,7 +54,9 @@ class DistributedRamCache(BaseCache): ...@@ -55,7 +54,9 @@ class DistributedRamCache(BaseCache):
def __init__(self, params={}): def __init__(self, params={}):
self._servers = params.get('server', '') self._servers = params.get('server', '')
self._debugLevel = params.get('debugLevel', 0) self._server_max_key_length = params.get('server_max_key_length', 250)
self._server_max_value_length = params.get('server_max_value_length', 1024*1024)
self._debug_level = params.get('debug_level', 0)
BaseCache.__init__(self) BaseCache.__init__(self)
def initCacheStorage(self): def initCacheStorage(self):
...@@ -74,7 +75,10 @@ class DistributedRamCache(BaseCache): ...@@ -74,7 +75,10 @@ class DistributedRamCache(BaseCache):
memcache_conn = connection_pool.get(thread_id, None) memcache_conn = connection_pool.get(thread_id, None)
if memcache_conn is None: if memcache_conn is None:
## we don't have memcache_conn for this thread ## we don't have memcache_conn for this thread
memcache_conn = memcache.Client(self._servers.split('\n'), debug=self._debugLevel) memcache_conn = memcache.Client(self._servers.split('\n'),
debug=self._debug_level,
server_max_key_length=self._server_max_key_length,
server_max_value_length=self._server_max_value_length)
connection_pool[thread_id] = memcache_conn connection_pool[thread_id] = memcache_conn
return memcache_conn return memcache_conn
else: else:
...@@ -85,23 +89,27 @@ class DistributedRamCache(BaseCache): ...@@ -85,23 +89,27 @@ class DistributedRamCache(BaseCache):
## memcached doesn't support namespaces (cache scopes) so to "emmulate" ## memcached doesn't support namespaces (cache scopes) so to "emmulate"
## such behaviour when constructing cache_id we add scope in front ## such behaviour when constructing cache_id we add scope in front
cache_id = "%s.%s" %(scope, cache_id) cache_id = "%s.%s" %(scope, cache_id)
if self._server_max_key_length != 0:
## memcached will fail to store cache_id longer than MEMCACHED_SERVER_MAX_KEY_LENGTH. ## memcached will fail to store cache_id longer than MEMCACHED_SERVER_MAX_KEY_LENGTH.
return cache_id[:MEMCACHED_SERVER_MAX_KEY_LENGTH] return cache_id[:self._server_max_key_length]
return cache_id
def get(self, cache_id, scope, default=None): def get(self, cache_id, scope, default=None):
cache_storage = self.getCacheStorage() cache_storage = self.getCacheStorage()
cache_id = self.checkAndFixCacheId(cache_id, scope) cache_id = self.checkAndFixCacheId(cache_id, scope)
cache_entry = cache_storage.get(cache_id) cache_entry = cache_storage.get(cache_id)
if cache_entry is not None:
# since some memcached-like products don't support expiration, we
# check it by ourselves.
if cache_entry.isExpired():
cache_storage.delete(cache_id)
return default
self.markCacheHit() self.markCacheHit()
return cache_entry return cache_entry or default
def set(self, cache_id, scope, value, cache_duration= None, calculation_time=0): def set(self, cache_id, scope, value, cache_duration=None, calculation_time=0):
cache_storage = self.getCacheStorage() cache_storage = self.getCacheStorage()
cache_id = self.checkAndFixCacheId(cache_id, scope) cache_id = self.checkAndFixCacheId(cache_id, scope)
if cache_duration is None:
## what should be default cache_duration when None is specified?
## currently when 'None' it means forever so give it big value of 24 hours
cache_duration = 86400
cache_entry = CacheEntry(value, cache_duration, calculation_time) cache_entry = CacheEntry(value, cache_duration, calculation_time)
cache_storage.set(cache_id, cache_entry, cache_duration) cache_storage.set(cache_id, cache_entry, cache_duration)
self.markCacheMiss() self.markCacheMiss()
...@@ -114,7 +122,7 @@ class DistributedRamCache(BaseCache): ...@@ -114,7 +122,7 @@ class DistributedRamCache(BaseCache):
## The workaround of this problem is to create a new connection for every cache access ## The workaround of this problem is to create a new connection for every cache access
## but that's too much overhead or create a new connection when cache is to be expired. ## but that's too much overhead or create a new connection when cache is to be expired.
## This way we can catch memcached server failures. BTW: This hack is forced by the lack functionality in python-memcached ## This way we can catch memcached server failures. BTW: This hack is forced by the lack functionality in python-memcached
#self._cache = memcache.Client(self._servers.split('\n'), debug=self._debugLevel) #self._cache = memcache.Client(self._servers.split('\n'), debug=self._debug_level)
pass pass
def delete(self, cache_id, scope): def delete(self, cache_id, scope):
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment