Commit d9c996fd authored by Vincent Pelletier's avatar Vincent Pelletier

MemcachedTool: Pool memcache.Client instances

Unlike other database adaptors, memcache adaptors can be shared among
transactions. So pool them in order to reduce the number of connections
established to memcached servers. And as the connections handle
thread-safety internally (by inheriting from threading.local), threads
can share the same connection instance.
Also, do not create a new connector each time we get disconnected, just
reconnect.
Also, memcached internally retries connection, so no need to retry it
ourselves.
Also, do not log when failing to update & delete entries in cache
server: this is a racy cache, each read much be checked anyway, so
failed writes are just business as usual.
Also, document a bit more why we catch "any" exception happening during
finish, and specify base exception class following python coding best
practices.
Also, use named constant for loggin levels for remaining log directives.
Also, display traceback when catching a connection issue during
__getitem__, as the original exception gets converted into a KeyError.
parent bdc4326e
...@@ -62,12 +62,39 @@ if memcache is not None: ...@@ -62,12 +62,39 @@ if memcache is not None:
# Real memcache tool # Real memcache tool
from Shared.DC.ZRDB.TM import TM from Shared.DC.ZRDB.TM import TM
from Products.PythonScripts.Utility import allow_class from Products.PythonScripts.Utility import allow_class
from zLOG import LOG from zLOG import LOG, INFO
MARKER = object() MARKER = object()
DELETE_ACTION = 0 DELETE_ACTION = 0
UPDATE_ACTION = 1 UPDATE_ACTION = 1
_client_pool = {}
def getClient(server_list, server_max_key_length, server_max_value_length):
"""
Pool memcache.Client instances.
This is possible as there is no such thing as a database snapshot on
memcached connections (unlike, for example, mysql).
Also, memcached.Client instance are thread-safe (by inheriting from
threading.local), so we only need one instance per parameter set (and
we use few enough parameter variants to make this manageable).
"""
key = (
tuple(sorted(server_list)),
server_max_key_length,
server_max_value_length,
)
try:
return _client_pool[key]
except KeyError:
client = _client_pool[key] = memcache.Client(
server_list,
pickleProtocol=-1, # use the highest available version
server_max_key_length=server_max_key_length,
server_max_value_length=server_max_value_length,
)
return client
class MemcachedDict(TM): class MemcachedDict(TM):
""" """
Present memcached similarly to a dictionary (not all method are Present memcached similarly to a dictionary (not all method are
...@@ -108,14 +135,10 @@ if memcache is not None: ...@@ -108,14 +135,10 @@ if memcache is not None:
self.expiration_time = expiration_time self.expiration_time = expiration_time
self.server_max_key_length = server_max_key_length self.server_max_key_length = server_max_key_length
self.server_max_value_length = server_max_value_length self.server_max_value_length = server_max_value_length
self._initialiseConnection() self.memcached_connection = getClient(
server_list,
def _initialiseConnection(self): server_max_key_length=server_max_key_length,
self.memcached_connection = memcache.Client( server_max_value_length=server_max_value_length,
self.server_list,
pickleProtocol=-1, # use the highest available version
server_max_key_length=self.server_max_key_length,
server_max_value_length=self.server_max_value_length,
) )
def _finish(self, *ignored): def _finish(self, *ignored):
...@@ -136,25 +159,17 @@ if memcache is not None: ...@@ -136,25 +159,17 @@ if memcache is not None:
for key, action in self.scheduled_action_dict.iteritems(): for key, action in self.scheduled_action_dict.iteritems():
encoded_key = encodeKey(key) encoded_key = encodeKey(key)
if action is UPDATE_ACTION: if action is UPDATE_ACTION:
succeed = self.memcached_connection.set(encoded_key, self.memcached_connection.set(
self.local_cache[key], encoded_key,
expiration_time) self.local_cache[key],
if not succeed: expiration_time,
self._initialiseConnection() )
succeed = self.memcached_connection.set(encoded_key,
self.local_cache[key],
expiration_time)
if not succeed:
LOG('MemcacheTool', 0, 'set command to memcached server (%r) failed' % (self.server_list,))
elif action is DELETE_ACTION: elif action is DELETE_ACTION:
succeed = self.memcached_connection.delete(encoded_key, 0) self.memcached_connection.delete(encoded_key, 0)
if not succeed: except Exception:
self._initialiseConnection() # This is a cache. Failing to push data to server must be fine, as long as
succeed = self.memcached_connection.delete(encoded_key, 0) # cleanup succeeds.
if not succeed: LOG('MemcachedDict', INFO, 'An exception occured during _finish', error=True)
LOG('MemcacheTool', 0, 'delete command to memcached server (%r) failed' % (self.server_list,))
except:
LOG('MemcachedDict', 0, 'An exception occured during _finish', error=True)
self.__cleanup() self.__cleanup()
def _abort(self, *ignored): def _abort(self, *ignored):
...@@ -179,15 +194,8 @@ if memcache is not None: ...@@ -179,15 +194,8 @@ if memcache is not None:
try: try:
result = self.memcached_connection.get(encoded_key) result = self.memcached_connection.get(encoded_key)
except memcache.Client.MemcachedConnectionError: except memcache.Client.MemcachedConnectionError:
self._initialiseConnection() LOG('MemcacheTool', INFO, 'get command to memcached server (%r) failed' % (self.server_list,), error=True)
try: raise KeyError
result = self.memcached_connection.get(encoded_key)
except memcache.Client.MemcachedConnectionError:
# maybe the server is not available at all / misconfigured
LOG('MemcacheTool', 0,
'get command to memcached server (%r) failed'
% (self.server_list,))
raise KeyError
self.local_cache[key] = result self.local_cache[key] = result
return result return result
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment