Commit 6e612ae0 authored by Dmitriy Vyukov's avatar Dmitriy Vyukov

runtime: fix potential memory corruption

Reinforce the guarantee that MSpan_EnsureSwept actually ensures that the span is swept.
I have not observed crashes related to this, but I do not see why it can't crash as well.

LGTM=rsc
R=golang-codereviews
CC=golang-codereviews, khr, rsc
https://golang.org/cl/67990043
parent 69257d17
...@@ -1694,16 +1694,19 @@ runtime·MSpan_EnsureSwept(MSpan *s) ...@@ -1694,16 +1694,19 @@ runtime·MSpan_EnsureSwept(MSpan *s)
{ {
uint32 sg; uint32 sg;
// Caller must disable preemption.
// Otherwise when this function returns the span can become unswept again
// (if GC is triggered on another goroutine).
if(m->locks == 0 && m->mallocing == 0)
runtime·throw("MSpan_EnsureSwept: m is not locked");
sg = runtime·mheap.sweepgen; sg = runtime·mheap.sweepgen;
if(runtime·atomicload(&s->sweepgen) == sg) if(runtime·atomicload(&s->sweepgen) == sg)
return; return;
m->locks++;
if(runtime·cas(&s->sweepgen, sg-2, sg-1)) { if(runtime·cas(&s->sweepgen, sg-2, sg-1)) {
runtime·MSpan_Sweep(s); runtime·MSpan_Sweep(s);
m->locks--;
return; return;
} }
m->locks--;
// unfortunate condition, and we don't have efficient means to wait // unfortunate condition, and we don't have efficient means to wait
while(runtime·atomicload(&s->sweepgen) != sg) while(runtime·atomicload(&s->sweepgen) != sg)
runtime·osyield(); runtime·osyield();
......
...@@ -653,6 +653,7 @@ addspecial(void *p, Special *s) ...@@ -653,6 +653,7 @@ addspecial(void *p, Special *s)
// Ensure that the span is swept. // Ensure that the span is swept.
// GC accesses specials list w/o locks. And it's just much safer. // GC accesses specials list w/o locks. And it's just much safer.
m->locks++;
runtime·MSpan_EnsureSwept(span); runtime·MSpan_EnsureSwept(span);
offset = (uintptr)p - (span->start << PageShift); offset = (uintptr)p - (span->start << PageShift);
...@@ -665,6 +666,7 @@ addspecial(void *p, Special *s) ...@@ -665,6 +666,7 @@ addspecial(void *p, Special *s)
while((x = *t) != nil) { while((x = *t) != nil) {
if(offset == x->offset && kind == x->kind) { if(offset == x->offset && kind == x->kind) {
runtime·unlock(&span->specialLock); runtime·unlock(&span->specialLock);
m->locks--;
return false; // already exists return false; // already exists
} }
if(offset < x->offset || (offset == x->offset && kind < x->kind)) if(offset < x->offset || (offset == x->offset && kind < x->kind))
...@@ -676,6 +678,7 @@ addspecial(void *p, Special *s) ...@@ -676,6 +678,7 @@ addspecial(void *p, Special *s)
s->next = x; s->next = x;
*t = s; *t = s;
runtime·unlock(&span->specialLock); runtime·unlock(&span->specialLock);
m->locks--;
return true; return true;
} }
...@@ -695,6 +698,7 @@ removespecial(void *p, byte kind) ...@@ -695,6 +698,7 @@ removespecial(void *p, byte kind)
// Ensure that the span is swept. // Ensure that the span is swept.
// GC accesses specials list w/o locks. And it's just much safer. // GC accesses specials list w/o locks. And it's just much safer.
m->locks++;
runtime·MSpan_EnsureSwept(span); runtime·MSpan_EnsureSwept(span);
offset = (uintptr)p - (span->start << PageShift); offset = (uintptr)p - (span->start << PageShift);
...@@ -707,11 +711,13 @@ removespecial(void *p, byte kind) ...@@ -707,11 +711,13 @@ removespecial(void *p, byte kind)
if(offset == s->offset && kind == s->kind) { if(offset == s->offset && kind == s->kind) {
*t = s->next; *t = s->next;
runtime·unlock(&span->specialLock); runtime·unlock(&span->specialLock);
m->locks--;
return s; return s;
} }
t = &s->next; t = &s->next;
} }
runtime·unlock(&span->specialLock); runtime·unlock(&span->specialLock);
m->locks--;
return nil; return nil;
} }
...@@ -805,6 +811,8 @@ runtime·freeallspecials(MSpan *span, void *p, uintptr size) ...@@ -805,6 +811,8 @@ runtime·freeallspecials(MSpan *span, void *p, uintptr size)
Special *s, **t, *list; Special *s, **t, *list;
uintptr offset; uintptr offset;
if(span->sweepgen != runtime·mheap.sweepgen)
runtime·throw("runtime: freeallspecials: unswept span");
// first, collect all specials into the list; then, free them // first, collect all specials into the list; then, free them
// this is required to not cause deadlock between span->specialLock and proflock // this is required to not cause deadlock between span->specialLock and proflock
list = nil; list = nil;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment