Commit 156da51c by Julien Muchembled

mysql: do not full-scan for duplicates of big oids if deduplication is disabled

1 parent a63b45fe
...@@ -164,6 +164,13 @@ class MySQLDatabaseManager(DatabaseManager): ...@@ -164,6 +164,13 @@ class MySQLDatabaseManager(DatabaseManager):
" Minimal value must be %uk." " Minimal value must be %uk."
% (name, self._max_allowed_packet // 1024)) % (name, self._max_allowed_packet // 1024))
self._max_allowed_packet = int(value) self._max_allowed_packet = int(value)
try:
self._dedup = bool(query(
"SHOW INDEX FROM data WHERE key_name='hash'"))
except ProgrammingError as e:
if e.args[0] != NO_SUCH_TABLE:
raise
self._dedup = None
_connect = auto_reconnect(_tryConnect) _connect = auto_reconnect(_tryConnect)
...@@ -322,6 +329,9 @@ class MySQLDatabaseManager(DatabaseManager): ...@@ -322,6 +329,9 @@ class MySQLDatabaseManager(DatabaseManager):
for table, schema in schema_dict.iteritems(): for table, schema in schema_dict.iteritems():
q(schema % ('IF NOT EXISTS ' + table)) q(schema % ('IF NOT EXISTS ' + table))
if self._dedup is None:
self._dedup = dedup
self._uncommitted_data.update(q("SELECT data_id, count(*)" self._uncommitted_data.update(q("SELECT data_id, count(*)"
" FROM tobj WHERE data_id IS NOT NULL GROUP BY data_id")) " FROM tobj WHERE data_id IS NOT NULL GROUP BY data_id"))
...@@ -608,18 +618,19 @@ class MySQLDatabaseManager(DatabaseManager): ...@@ -608,18 +618,19 @@ class MySQLDatabaseManager(DatabaseManager):
if 0x1000000 <= len(data): # 16M (MEDIUMBLOB limit) if 0x1000000 <= len(data): # 16M (MEDIUMBLOB limit)
compression |= 0x80 compression |= 0x80
q = self.query q = self.query
for r, d in q("SELECT id, value FROM data" if self._dedup:
" WHERE hash='%s' AND compression=%s" for r, d in q("SELECT id, value FROM data"
% (checksum, compression)): " WHERE hash='%s' AND compression=%s"
i = 0 % (checksum, compression)):
for d in self._bigData(d): i = 0
j = i + len(d) for d in self._bigData(d):
if data[i:j] != d: j = i + len(d)
if data[i:j] != d:
raise IntegrityError(DUP_ENTRY)
i = j
if j != len(data):
raise IntegrityError(DUP_ENTRY) raise IntegrityError(DUP_ENTRY)
i = j return r
if j != len(data):
raise IntegrityError(DUP_ENTRY)
return r
i = 'NULL' i = 'NULL'
length = len(data) length = len(data)
for j in xrange(0, length, 0x800000): # 8M for j in xrange(0, length, 0x800000): # 8M
......
...@@ -60,9 +60,9 @@ class PCounterWithResolution(PCounter): ...@@ -60,9 +60,9 @@ class PCounterWithResolution(PCounter):
class Test(NEOThreadedTest): class Test(NEOThreadedTest):
@with_cluster() def testBasicStore(self, dedup=False):
def testBasicStore(self, cluster): with NEOCluster(dedup=dedup) as cluster:
if 1: cluster.start()
storage = cluster.getZODBStorage() storage = cluster.getZODBStorage()
storage.sync() storage.sync()
storage.app.max_reconnection_to_master = 0 storage.app.max_reconnection_to_master = 0
......
...@@ -37,6 +37,11 @@ class SSLTests(SSLMixin, test.Test): ...@@ -37,6 +37,11 @@ class SSLTests(SSLMixin, test.Test):
testStorageDataLock2 = None testStorageDataLock2 = None
testUndoConflictDuringStore = None testUndoConflictDuringStore = None
# With MySQL, this test is expensive.
# Let's check deduplication of big oids here.
def testBasicStore(self):
super(SSLTests, self).testBasicStore(True)
def testAbortConnection(self, after_handshake=1): def testAbortConnection(self, after_handshake=1):
with self.getLoopbackConnection() as conn: with self.getLoopbackConnection() as conn:
conn.ask(Packets.Ping()) conn.ask(Packets.Ping())
......
Styling with Markdown is supported
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!