Commit 534b0a8b authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'core-debugobjects-2020-01-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull debugobjects update from Thomas Gleixner:
 "A single commit for debug objects which fixes a pile of potential data
  races detected by KCSAN"

* tag 'core-debugobjects-2020-01-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  debugobjects: Fix various data races
parents b11c89a1 35fd7a63
...@@ -132,14 +132,18 @@ static void fill_pool(void) ...@@ -132,14 +132,18 @@ static void fill_pool(void)
struct debug_obj *obj; struct debug_obj *obj;
unsigned long flags; unsigned long flags;
if (likely(obj_pool_free >= debug_objects_pool_min_level)) if (likely(READ_ONCE(obj_pool_free) >= debug_objects_pool_min_level))
return; return;
/* /*
* Reuse objs from the global free list; they will be reinitialized * Reuse objs from the global free list; they will be reinitialized
* when allocating. * when allocating.
*
* Both obj_nr_tofree and obj_pool_free are checked locklessly; the
* READ_ONCE()s pair with the WRITE_ONCE()s in pool_lock critical
* sections.
*/ */
while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) { while (READ_ONCE(obj_nr_tofree) && (READ_ONCE(obj_pool_free) < obj_pool_min_free)) {
raw_spin_lock_irqsave(&pool_lock, flags); raw_spin_lock_irqsave(&pool_lock, flags);
/* /*
* Recheck with the lock held as the worker thread might have * Recheck with the lock held as the worker thread might have
...@@ -148,9 +152,9 @@ static void fill_pool(void) ...@@ -148,9 +152,9 @@ static void fill_pool(void)
while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) { while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) {
obj = hlist_entry(obj_to_free.first, typeof(*obj), node); obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
hlist_del(&obj->node); hlist_del(&obj->node);
obj_nr_tofree--; WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
hlist_add_head(&obj->node, &obj_pool); hlist_add_head(&obj->node, &obj_pool);
obj_pool_free++; WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
} }
raw_spin_unlock_irqrestore(&pool_lock, flags); raw_spin_unlock_irqrestore(&pool_lock, flags);
} }
...@@ -158,7 +162,7 @@ static void fill_pool(void) ...@@ -158,7 +162,7 @@ static void fill_pool(void)
if (unlikely(!obj_cache)) if (unlikely(!obj_cache))
return; return;
while (obj_pool_free < debug_objects_pool_min_level) { while (READ_ONCE(obj_pool_free) < debug_objects_pool_min_level) {
struct debug_obj *new[ODEBUG_BATCH_SIZE]; struct debug_obj *new[ODEBUG_BATCH_SIZE];
int cnt; int cnt;
...@@ -174,7 +178,7 @@ static void fill_pool(void) ...@@ -174,7 +178,7 @@ static void fill_pool(void)
while (cnt) { while (cnt) {
hlist_add_head(&new[--cnt]->node, &obj_pool); hlist_add_head(&new[--cnt]->node, &obj_pool);
debug_objects_allocated++; debug_objects_allocated++;
obj_pool_free++; WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
} }
raw_spin_unlock_irqrestore(&pool_lock, flags); raw_spin_unlock_irqrestore(&pool_lock, flags);
} }
...@@ -236,7 +240,7 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) ...@@ -236,7 +240,7 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
obj = __alloc_object(&obj_pool); obj = __alloc_object(&obj_pool);
if (obj) { if (obj) {
obj_pool_used++; obj_pool_used++;
obj_pool_free--; WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
/* /*
* Looking ahead, allocate one batch of debug objects and * Looking ahead, allocate one batch of debug objects and
...@@ -255,7 +259,7 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) ...@@ -255,7 +259,7 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
&percpu_pool->free_objs); &percpu_pool->free_objs);
percpu_pool->obj_free++; percpu_pool->obj_free++;
obj_pool_used++; obj_pool_used++;
obj_pool_free--; WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
} }
} }
...@@ -309,8 +313,8 @@ static void free_obj_work(struct work_struct *work) ...@@ -309,8 +313,8 @@ static void free_obj_work(struct work_struct *work)
obj = hlist_entry(obj_to_free.first, typeof(*obj), node); obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
hlist_del(&obj->node); hlist_del(&obj->node);
hlist_add_head(&obj->node, &obj_pool); hlist_add_head(&obj->node, &obj_pool);
obj_pool_free++; WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
obj_nr_tofree--; WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
} }
raw_spin_unlock_irqrestore(&pool_lock, flags); raw_spin_unlock_irqrestore(&pool_lock, flags);
return; return;
...@@ -324,7 +328,7 @@ static void free_obj_work(struct work_struct *work) ...@@ -324,7 +328,7 @@ static void free_obj_work(struct work_struct *work)
if (obj_nr_tofree) { if (obj_nr_tofree) {
hlist_move_list(&obj_to_free, &tofree); hlist_move_list(&obj_to_free, &tofree);
debug_objects_freed += obj_nr_tofree; debug_objects_freed += obj_nr_tofree;
obj_nr_tofree = 0; WRITE_ONCE(obj_nr_tofree, 0);
} }
raw_spin_unlock_irqrestore(&pool_lock, flags); raw_spin_unlock_irqrestore(&pool_lock, flags);
...@@ -375,10 +379,10 @@ static void __free_object(struct debug_obj *obj) ...@@ -375,10 +379,10 @@ static void __free_object(struct debug_obj *obj)
obj_pool_used--; obj_pool_used--;
if (work) { if (work) {
obj_nr_tofree++; WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
hlist_add_head(&obj->node, &obj_to_free); hlist_add_head(&obj->node, &obj_to_free);
if (lookahead_count) { if (lookahead_count) {
obj_nr_tofree += lookahead_count; WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + lookahead_count);
obj_pool_used -= lookahead_count; obj_pool_used -= lookahead_count;
while (lookahead_count) { while (lookahead_count) {
hlist_add_head(&objs[--lookahead_count]->node, hlist_add_head(&objs[--lookahead_count]->node,
...@@ -396,15 +400,15 @@ static void __free_object(struct debug_obj *obj) ...@@ -396,15 +400,15 @@ static void __free_object(struct debug_obj *obj)
for (i = 0; i < ODEBUG_BATCH_SIZE; i++) { for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
obj = __alloc_object(&obj_pool); obj = __alloc_object(&obj_pool);
hlist_add_head(&obj->node, &obj_to_free); hlist_add_head(&obj->node, &obj_to_free);
obj_pool_free--; WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
obj_nr_tofree++; WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
} }
} }
} else { } else {
obj_pool_free++; WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
hlist_add_head(&obj->node, &obj_pool); hlist_add_head(&obj->node, &obj_pool);
if (lookahead_count) { if (lookahead_count) {
obj_pool_free += lookahead_count; WRITE_ONCE(obj_pool_free, obj_pool_free + lookahead_count);
obj_pool_used -= lookahead_count; obj_pool_used -= lookahead_count;
while (lookahead_count) { while (lookahead_count) {
hlist_add_head(&objs[--lookahead_count]->node, hlist_add_head(&objs[--lookahead_count]->node,
...@@ -423,7 +427,7 @@ static void __free_object(struct debug_obj *obj) ...@@ -423,7 +427,7 @@ static void __free_object(struct debug_obj *obj)
static void free_object(struct debug_obj *obj) static void free_object(struct debug_obj *obj)
{ {
__free_object(obj); __free_object(obj);
if (!obj_freeing && obj_nr_tofree) { if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
WRITE_ONCE(obj_freeing, true); WRITE_ONCE(obj_freeing, true);
schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY); schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
} }
...@@ -982,7 +986,7 @@ static void __debug_check_no_obj_freed(const void *address, unsigned long size) ...@@ -982,7 +986,7 @@ static void __debug_check_no_obj_freed(const void *address, unsigned long size)
debug_objects_maxchecked = objs_checked; debug_objects_maxchecked = objs_checked;
/* Schedule work to actually kmem_cache_free() objects */ /* Schedule work to actually kmem_cache_free() objects */
if (!obj_freeing && obj_nr_tofree) { if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
WRITE_ONCE(obj_freeing, true); WRITE_ONCE(obj_freeing, true);
schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY); schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
} }
...@@ -1008,12 +1012,12 @@ static int debug_stats_show(struct seq_file *m, void *v) ...@@ -1008,12 +1012,12 @@ static int debug_stats_show(struct seq_file *m, void *v)
seq_printf(m, "max_checked :%d\n", debug_objects_maxchecked); seq_printf(m, "max_checked :%d\n", debug_objects_maxchecked);
seq_printf(m, "warnings :%d\n", debug_objects_warnings); seq_printf(m, "warnings :%d\n", debug_objects_warnings);
seq_printf(m, "fixups :%d\n", debug_objects_fixups); seq_printf(m, "fixups :%d\n", debug_objects_fixups);
seq_printf(m, "pool_free :%d\n", obj_pool_free + obj_percpu_free); seq_printf(m, "pool_free :%d\n", READ_ONCE(obj_pool_free) + obj_percpu_free);
seq_printf(m, "pool_pcp_free :%d\n", obj_percpu_free); seq_printf(m, "pool_pcp_free :%d\n", obj_percpu_free);
seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free); seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
seq_printf(m, "pool_used :%d\n", obj_pool_used - obj_percpu_free); seq_printf(m, "pool_used :%d\n", obj_pool_used - obj_percpu_free);
seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used); seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
seq_printf(m, "on_free_list :%d\n", obj_nr_tofree); seq_printf(m, "on_free_list :%d\n", READ_ONCE(obj_nr_tofree));
seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated); seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
seq_printf(m, "objs_freed :%d\n", debug_objects_freed); seq_printf(m, "objs_freed :%d\n", debug_objects_freed);
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment