Commit 984a6083 authored by Waiman Long's avatar Waiman Long Committed by Andrew Morton

mm/kmemleak: prevent soft lockup in kmemleak_scan()'s object iteration loops

Commit 6edda04c ("mm/kmemleak: prevent soft lockup in first object
iteration loop of kmemleak_scan()") adds cond_resched() in the first
object iteration loop of kmemleak_scan().  However, it turns that the 2nd
objection iteration loop can still cause soft lockup to happen in some
cases.  So add a cond_resched() call in the 2nd and 3rd loops as well to
prevent that and for completeness.

Link: https://lkml.kernel.org/r/20221020175619.366317-1-longman@redhat.com
Fixes: 6edda04c ("mm/kmemleak: prevent soft lockup in first object iteration loop of kmemleak_scan()")
Signed-off-by: default avatarWaiman Long <longman@redhat.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Muchun Song <songmuchun@bytedance.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent e11c4e08
...@@ -1460,6 +1460,27 @@ static void scan_gray_list(void) ...@@ -1460,6 +1460,27 @@ static void scan_gray_list(void)
WARN_ON(!list_empty(&gray_list)); WARN_ON(!list_empty(&gray_list));
} }
/*
* Conditionally call resched() in a object iteration loop while making sure
* that the given object won't go away without RCU read lock by performing a
* get_object() if !pinned.
*
* Return: false if can't do a cond_resched() due to get_object() failure
* true otherwise
*/
static bool kmemleak_cond_resched(struct kmemleak_object *object, bool pinned)
{
if (!pinned && !get_object(object))
return false;
rcu_read_unlock();
cond_resched();
rcu_read_lock();
if (!pinned)
put_object(object);
return true;
}
/* /*
* Scan data sections and all the referenced memory blocks allocated via the * Scan data sections and all the referenced memory blocks allocated via the
* kernel's standard allocators. This function must be called with the * kernel's standard allocators. This function must be called with the
...@@ -1471,7 +1492,7 @@ static void kmemleak_scan(void) ...@@ -1471,7 +1492,7 @@ static void kmemleak_scan(void)
struct zone *zone; struct zone *zone;
int __maybe_unused i; int __maybe_unused i;
int new_leaks = 0; int new_leaks = 0;
int loop1_cnt = 0; int loop_cnt = 0;
jiffies_last_scan = jiffies; jiffies_last_scan = jiffies;
...@@ -1480,7 +1501,6 @@ static void kmemleak_scan(void) ...@@ -1480,7 +1501,6 @@ static void kmemleak_scan(void)
list_for_each_entry_rcu(object, &object_list, object_list) { list_for_each_entry_rcu(object, &object_list, object_list) {
bool obj_pinned = false; bool obj_pinned = false;
loop1_cnt++;
raw_spin_lock_irq(&object->lock); raw_spin_lock_irq(&object->lock);
#ifdef DEBUG #ifdef DEBUG
/* /*
...@@ -1514,24 +1534,11 @@ static void kmemleak_scan(void) ...@@ -1514,24 +1534,11 @@ static void kmemleak_scan(void)
raw_spin_unlock_irq(&object->lock); raw_spin_unlock_irq(&object->lock);
/* /*
* Do a cond_resched() to avoid soft lockup every 64k objects. * Do a cond_resched() every 64k objects to avoid soft lockup.
* Make sure a reference has been taken so that the object
* won't go away without RCU read lock.
*/ */
if (!(loop1_cnt & 0xffff)) { if (!(++loop_cnt & 0xffff) &&
if (!obj_pinned && !get_object(object)) { !kmemleak_cond_resched(object, obj_pinned))
/* Try the next object instead */ loop_cnt--; /* Try again on next object */
loop1_cnt--;
continue;
}
rcu_read_unlock();
cond_resched();
rcu_read_lock();
if (!obj_pinned)
put_object(object);
}
} }
rcu_read_unlock(); rcu_read_unlock();
...@@ -1598,7 +1605,15 @@ static void kmemleak_scan(void) ...@@ -1598,7 +1605,15 @@ static void kmemleak_scan(void)
* scan and color them gray until the next scan. * scan and color them gray until the next scan.
*/ */
rcu_read_lock(); rcu_read_lock();
loop_cnt = 0;
list_for_each_entry_rcu(object, &object_list, object_list) { list_for_each_entry_rcu(object, &object_list, object_list) {
/*
* Do a cond_resched() every 64k objects to avoid soft lockup.
*/
if (!(++loop_cnt & 0xffff) &&
!kmemleak_cond_resched(object, false))
loop_cnt--; /* Try again on next object */
/* /*
* This is racy but we can save the overhead of lock/unlock * This is racy but we can save the overhead of lock/unlock
* calls. The missed objects, if any, should be caught in * calls. The missed objects, if any, should be caught in
...@@ -1632,7 +1647,15 @@ static void kmemleak_scan(void) ...@@ -1632,7 +1647,15 @@ static void kmemleak_scan(void)
* Scanning result reporting. * Scanning result reporting.
*/ */
rcu_read_lock(); rcu_read_lock();
loop_cnt = 0;
list_for_each_entry_rcu(object, &object_list, object_list) { list_for_each_entry_rcu(object, &object_list, object_list) {
/*
* Do a cond_resched() every 64k objects to avoid soft lockup.
*/
if (!(++loop_cnt & 0xffff) &&
!kmemleak_cond_resched(object, false))
loop_cnt--; /* Try again on next object */
/* /*
* This is racy but we can save the overhead of lock/unlock * This is racy but we can save the overhead of lock/unlock
* calls. The missed objects, if any, should be caught in * calls. The missed objects, if any, should be caught in
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment