Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
N
neoppod
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
1
Issues
1
List
Boards
Labels
Milestones
Merge Requests
2
Merge Requests
2
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
nexedi
neoppod
Commits
fa14157b
Commit
fa14157b
authored
Oct 15, 2018
by
Julien Muchembled
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
storage: fix storage leak when an oid is stored several times within a transaction
parent
50e7fe52
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
77 additions
and
12 deletions
+77
-12
neo/storage/transactions.py
neo/storage/transactions.py
+22
-12
neo/tests/threaded/test.py
neo/tests/threaded/test.py
+55
-0
No files found.
neo/storage/transactions.py
View file @
fa14157b
...
...
@@ -160,7 +160,7 @@ class TransactionManager(EventQueue):
for
txn
,
ttid
in
sorted
((
txn
,
ttid
)
for
ttid
,
txn
in
self
.
_transaction_dict
.
iteritems
()):
assert
txn
.
lockless
.
issubset
(
txn
.
serial_dict
),
(
txn
.
lockless
,
txn
.
serial_dict
)
t
tid
,
t
xn
.
lockless
,
txn
.
serial_dict
)
for
oid
in
txn
.
lockless
:
partition
=
getPartition
(
oid
)
if
replicated
.
get
(
partition
):
...
...
@@ -471,6 +471,7 @@ class TransactionManager(EventQueue):
transaction
=
self
.
_transaction_dict
[
ttid
]
except
KeyError
:
raise
NotRegisteredError
assert
oid
not
in
transaction
.
serial_dict
locked
=
self
.
lockObject
(
ttid
,
serial
,
oid
)
transaction
.
serial_dict
[
oid
]
=
serial
if
not
locked
:
...
...
@@ -486,6 +487,12 @@ class TransactionManager(EventQueue):
except
KeyError
:
raise
NotRegisteredError
locked
=
self
.
lockObject
(
ttid
,
serial
,
oid
)
if
oid
in
transaction
.
serial_dict
:
# initially/still lockless, or undo
# XXX: We'd like to do that before calling lockObject,
# to release resources immediately (data, maybe lock)
# in case of delay/conflict.
# But keeping everything consistent is complicated.
self
.
_unstore
(
transaction
,
oid
)
transaction
.
serial_dict
[
oid
]
=
serial
# store object
if
data
is
None
:
...
...
@@ -519,19 +526,22 @@ class TransactionManager(EventQueue):
except
ConflictError
,
e
:
# Move the data back to the client for conflict resolution,
# since the client may not have it anymore.
try
:
data_id
=
transaction
.
store_dict
.
pop
(
oid
)[
1
]
except
KeyError
:
# check current
return
serial
,
e
.
tid
,
self
.
_unstore
(
transaction
,
oid
)
def
_unstore
(
self
,
transaction
,
oid
):
try
:
data_id
=
transaction
.
store_dict
.
pop
(
oid
)[
1
]
except
KeyError
:
# check current
data
=
None
else
:
if
data_id
is
None
:
data
=
None
else
:
if
data_id
is
None
:
data
=
None
else
:
dm
=
self
.
_app
.
dm
data
=
dm
.
loadData
(
data_id
)
dm
.
releaseData
([
data_id
],
True
)
del
transaction
.
serial_dict
[
oid
]
return
serial
,
e
.
tid
,
data
dm
=
self
.
_app
.
dm
data
=
dm
.
loadData
(
data_id
)
dm
.
releaseData
([
data_id
],
True
)
del
transaction
.
serial_dict
[
oid
]
return
data
def
abort
(
self
,
ttid
,
even_if_locked
=
False
):
"""
...
...
neo/tests/threaded/test.py
View file @
fa14157b
...
...
@@ -162,6 +162,7 @@ class Test(NEOThreadedTest):
undo
.
tpc_finish
(
txn
)
t
.
begin
()
self
.
assertEqual
(
ob
.
value
,
5
)
self
.
assertFalse
(
cluster
.
storage
.
dm
.
getOrphanList
())
return
ob
@
with_cluster
()
...
...
@@ -2165,6 +2166,7 @@ class Test(NEOThreadedTest):
1
:
[
'StoreTransaction'
],
3
:
[
4
,
'StoreTransaction'
],
})
self
.
assertFalse
(
s1
.
dm
.
getOrphanList
())
@
with_cluster
(
replicas
=
1
)
def
testNotifyReplicated2
(
self
,
cluster
):
...
...
@@ -2361,6 +2363,59 @@ class Test(NEOThreadedTest):
self
.
assertFalse
(
end
)
self
.
assertPartitionTable
(
cluster
,
'UU|UU'
)
@
with_cluster
(
partitions
=
2
,
storage_count
=
2
)
def
testUnstore
(
self
,
cluster
):
"""
Show that when resolving a conflict after a lockless write, the storage
can't easily discard the data of the previous store, as it would make
internal data inconsistent. This is currently protected by a assertion
when trying to notifying the master that the replication is finished.
"""
t1
,
c1
=
cluster
.
getTransaction
()
r
=
c1
.
root
()
for
x
in
'ab'
:
r
[
x
]
=
PCounterWithResolution
()
t1
.
commit
()
cluster
.
stop
(
replicas
=
1
)
cluster
.
start
()
s0
,
s1
=
cluster
.
sortStorageList
()
t1
,
c1
=
cluster
.
getTransaction
()
r
=
c1
.
root
()
r
.
_p_changed
=
1
r
[
'b'
].
value
+=
1
with
ConnectionFilter
()
as
f
:
delayReplication
=
f
.
delayAnswerFetchObjects
()
cluster
.
neoctl
.
tweakPartitionTable
()
self
.
tic
()
t2
,
c2
=
cluster
.
getTransaction
()
x
=
c2
.
root
()[
'b'
]
x
.
value
+=
2
t2
.
commit
()
delayStore
=
f
.
delayAnswerStoreObject
()
delayFinish
=
f
.
delayAskFinishTransaction
()
x
.
value
+=
3
commit2
=
self
.
newPausedThread
(
t2
.
commit
)
def
t2_b
(
*
args
,
**
kw
):
yield
1
self
.
tic
()
f
.
remove
(
delayReplication
)
f
.
remove
(
delayStore
)
self
.
tic
()
def
t1_resolve
(
*
args
,
**
kw
):
yield
0
self
.
tic
()
f
.
remove
(
delayFinish
)
with
self
.
thread_switcher
((
commit2
,),
(
1
,
0
,
0
,
1
,
t2_b
,
0
,
t1_resolve
),
(
'tpc_begin'
,
'tpc_begin'
,
0
,
2
,
2
,
'StoreTransaction'
))
as
end
:
t1
.
commit
()
commit2
.
join
()
t1
.
begin
()
self
.
assertEqual
(
c1
.
root
()[
'b'
].
value
,
6
)
self
.
assertPartitionTable
(
cluster
,
'UU|UU'
)
self
.
assertEqual
(
end
,
{
0
:
[
2
,
2
,
'StoreTransaction'
]})
self
.
assertFalse
(
s1
.
dm
.
getOrphanList
())
@
with_cluster
(
storage_count
=
2
,
partitions
=
2
)
def
testDeadlockAvoidanceBeforeInvolvingAnotherNode
(
self
,
cluster
):
t1
,
c1
=
cluster
.
getTransaction
()
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment