Commit 156da51c authored by Julien Muchembled's avatar Julien Muchembled

mysql: do not full-scan for duplicates of big oids if deduplication is disabled

parent a63b45fe
...@@ -164,6 +164,13 @@ class MySQLDatabaseManager(DatabaseManager): ...@@ -164,6 +164,13 @@ class MySQLDatabaseManager(DatabaseManager):
" Minimal value must be %uk." " Minimal value must be %uk."
% (name, self._max_allowed_packet // 1024)) % (name, self._max_allowed_packet // 1024))
self._max_allowed_packet = int(value) self._max_allowed_packet = int(value)
try:
self._dedup = bool(query(
"SHOW INDEX FROM data WHERE key_name='hash'"))
except ProgrammingError as e:
if e.args[0] != NO_SUCH_TABLE:
raise
self._dedup = None
_connect = auto_reconnect(_tryConnect) _connect = auto_reconnect(_tryConnect)
...@@ -322,6 +329,9 @@ class MySQLDatabaseManager(DatabaseManager): ...@@ -322,6 +329,9 @@ class MySQLDatabaseManager(DatabaseManager):
for table, schema in schema_dict.iteritems(): for table, schema in schema_dict.iteritems():
q(schema % ('IF NOT EXISTS ' + table)) q(schema % ('IF NOT EXISTS ' + table))
if self._dedup is None:
self._dedup = dedup
self._uncommitted_data.update(q("SELECT data_id, count(*)" self._uncommitted_data.update(q("SELECT data_id, count(*)"
" FROM tobj WHERE data_id IS NOT NULL GROUP BY data_id")) " FROM tobj WHERE data_id IS NOT NULL GROUP BY data_id"))
...@@ -608,6 +618,7 @@ class MySQLDatabaseManager(DatabaseManager): ...@@ -608,6 +618,7 @@ class MySQLDatabaseManager(DatabaseManager):
if 0x1000000 <= len(data): # 16M (MEDIUMBLOB limit) if 0x1000000 <= len(data): # 16M (MEDIUMBLOB limit)
compression |= 0x80 compression |= 0x80
q = self.query q = self.query
if self._dedup:
for r, d in q("SELECT id, value FROM data" for r, d in q("SELECT id, value FROM data"
" WHERE hash='%s' AND compression=%s" " WHERE hash='%s' AND compression=%s"
% (checksum, compression)): % (checksum, compression)):
......
...@@ -60,9 +60,9 @@ class PCounterWithResolution(PCounter): ...@@ -60,9 +60,9 @@ class PCounterWithResolution(PCounter):
class Test(NEOThreadedTest): class Test(NEOThreadedTest):
@with_cluster() def testBasicStore(self, dedup=False):
def testBasicStore(self, cluster): with NEOCluster(dedup=dedup) as cluster:
if 1: cluster.start()
storage = cluster.getZODBStorage() storage = cluster.getZODBStorage()
storage.sync() storage.sync()
storage.app.max_reconnection_to_master = 0 storage.app.max_reconnection_to_master = 0
......
...@@ -37,6 +37,11 @@ class SSLTests(SSLMixin, test.Test): ...@@ -37,6 +37,11 @@ class SSLTests(SSLMixin, test.Test):
testStorageDataLock2 = None testStorageDataLock2 = None
testUndoConflictDuringStore = None testUndoConflictDuringStore = None
# With MySQL, this test is expensive.
# Let's check deduplication of big oids here.
def testBasicStore(self):
super(SSLTests, self).testBasicStore(True)
def testAbortConnection(self, after_handshake=1): def testAbortConnection(self, after_handshake=1):
with self.getLoopbackConnection() as conn: with self.getLoopbackConnection() as conn:
conn.ask(Packets.Ping()) conn.ask(Packets.Ping())
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment