Commit 1ef2f06b authored by David Hildenbrand's avatar David Hildenbrand Committed by Michael Ellerman

powerpc/pseries/cmm: Convert loaned_pages to an atomic_long_t

When switching to balloon compaction, we want to drop the cmm_lock and
completely rely on the balloon compaction list lock internally.
loaned_pages is currently protected under the cmm_lock.

Note: Right now cmm_alloc_pages() and cmm_free_pages() can be called
at the same time, e.g., via the thread and a concurrent OOM notifier.
Signed-off-by: default avatarDavid Hildenbrand <david@redhat.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20191031142933.10779-8-david@redhat.com
parent 7659f5d6
...@@ -73,7 +73,7 @@ MODULE_PARM_DESC(debug, "Enable module debugging logging. Set to 1 to enable. " ...@@ -73,7 +73,7 @@ MODULE_PARM_DESC(debug, "Enable module debugging logging. Set to 1 to enable. "
#define cmm_dbg(...) if (cmm_debug) { printk(KERN_INFO "cmm: "__VA_ARGS__); } #define cmm_dbg(...) if (cmm_debug) { printk(KERN_INFO "cmm: "__VA_ARGS__); }
static unsigned long loaned_pages; static atomic_long_t loaned_pages;
static unsigned long loaned_pages_target; static unsigned long loaned_pages_target;
static unsigned long oom_freed_pages; static unsigned long oom_freed_pages;
...@@ -159,7 +159,7 @@ static long cmm_alloc_pages(long nr) ...@@ -159,7 +159,7 @@ static long cmm_alloc_pages(long nr)
} }
list_add(&page->lru, &cmm_page_list); list_add(&page->lru, &cmm_page_list);
loaned_pages++; atomic_long_inc(&loaned_pages);
adjust_managed_page_count(page, -1); adjust_managed_page_count(page, -1);
spin_unlock(&cmm_lock); spin_unlock(&cmm_lock);
nr--; nr--;
...@@ -189,7 +189,7 @@ static long cmm_free_pages(long nr) ...@@ -189,7 +189,7 @@ static long cmm_free_pages(long nr)
list_del(&page->lru); list_del(&page->lru);
adjust_managed_page_count(page, 1); adjust_managed_page_count(page, 1);
__free_page(page); __free_page(page);
loaned_pages--; atomic_long_dec(&loaned_pages);
nr--; nr--;
} }
spin_unlock(&cmm_lock); spin_unlock(&cmm_lock);
...@@ -214,7 +214,7 @@ static int cmm_oom_notify(struct notifier_block *self, ...@@ -214,7 +214,7 @@ static int cmm_oom_notify(struct notifier_block *self,
cmm_dbg("OOM processing started\n"); cmm_dbg("OOM processing started\n");
nr = cmm_free_pages(nr); nr = cmm_free_pages(nr);
loaned_pages_target = loaned_pages; loaned_pages_target = atomic_long_read(&loaned_pages);
*freed += KB2PAGES(oom_kb) - nr; *freed += KB2PAGES(oom_kb) - nr;
oom_freed_pages += KB2PAGES(oom_kb) - nr; oom_freed_pages += KB2PAGES(oom_kb) - nr;
cmm_dbg("OOM processing complete\n"); cmm_dbg("OOM processing complete\n");
...@@ -231,10 +231,11 @@ static int cmm_oom_notify(struct notifier_block *self, ...@@ -231,10 +231,11 @@ static int cmm_oom_notify(struct notifier_block *self,
**/ **/
static void cmm_get_mpp(void) static void cmm_get_mpp(void)
{ {
const long __loaned_pages = atomic_long_read(&loaned_pages);
const long total_pages = totalram_pages() + __loaned_pages;
int rc; int rc;
struct hvcall_mpp_data mpp_data; struct hvcall_mpp_data mpp_data;
signed long active_pages_target, page_loan_request, target; signed long active_pages_target, page_loan_request, target;
signed long total_pages = totalram_pages() + loaned_pages;
signed long min_mem_pages = (min_mem_mb * 1024 * 1024) / PAGE_SIZE; signed long min_mem_pages = (min_mem_mb * 1024 * 1024) / PAGE_SIZE;
rc = h_get_mpp(&mpp_data); rc = h_get_mpp(&mpp_data);
...@@ -243,7 +244,7 @@ static void cmm_get_mpp(void) ...@@ -243,7 +244,7 @@ static void cmm_get_mpp(void)
return; return;
page_loan_request = div_s64((s64)mpp_data.loan_request, PAGE_SIZE); page_loan_request = div_s64((s64)mpp_data.loan_request, PAGE_SIZE);
target = page_loan_request + (signed long)loaned_pages; target = page_loan_request + __loaned_pages;
if (target < 0 || total_pages < min_mem_pages) if (target < 0 || total_pages < min_mem_pages)
target = 0; target = 0;
...@@ -264,7 +265,7 @@ static void cmm_get_mpp(void) ...@@ -264,7 +265,7 @@ static void cmm_get_mpp(void)
loaned_pages_target = target; loaned_pages_target = target;
cmm_dbg("delta = %ld, loaned = %lu, target = %lu, oom = %lu, totalram = %lu\n", cmm_dbg("delta = %ld, loaned = %lu, target = %lu, oom = %lu, totalram = %lu\n",
page_loan_request, loaned_pages, loaned_pages_target, page_loan_request, __loaned_pages, loaned_pages_target,
oom_freed_pages, totalram_pages()); oom_freed_pages, totalram_pages());
} }
...@@ -282,6 +283,7 @@ static struct notifier_block cmm_oom_nb = { ...@@ -282,6 +283,7 @@ static struct notifier_block cmm_oom_nb = {
static int cmm_thread(void *dummy) static int cmm_thread(void *dummy)
{ {
unsigned long timeleft; unsigned long timeleft;
long __loaned_pages;
while (1) { while (1) {
timeleft = msleep_interruptible(delay * 1000); timeleft = msleep_interruptible(delay * 1000);
...@@ -312,11 +314,12 @@ static int cmm_thread(void *dummy) ...@@ -312,11 +314,12 @@ static int cmm_thread(void *dummy)
cmm_get_mpp(); cmm_get_mpp();
if (loaned_pages_target > loaned_pages) { __loaned_pages = atomic_long_read(&loaned_pages);
if (cmm_alloc_pages(loaned_pages_target - loaned_pages)) if (loaned_pages_target > __loaned_pages) {
loaned_pages_target = loaned_pages; if (cmm_alloc_pages(loaned_pages_target - __loaned_pages))
} else if (loaned_pages_target < loaned_pages) loaned_pages_target = __loaned_pages;
cmm_free_pages(loaned_pages - loaned_pages_target); } else if (loaned_pages_target < __loaned_pages)
cmm_free_pages(__loaned_pages - loaned_pages_target);
} }
return 0; return 0;
} }
...@@ -330,7 +333,7 @@ static int cmm_thread(void *dummy) ...@@ -330,7 +333,7 @@ static int cmm_thread(void *dummy)
} \ } \
static DEVICE_ATTR(name, 0444, show_##name, NULL) static DEVICE_ATTR(name, 0444, show_##name, NULL)
CMM_SHOW(loaned_kb, "%lu\n", PAGES2KB(loaned_pages)); CMM_SHOW(loaned_kb, "%lu\n", PAGES2KB(atomic_long_read(&loaned_pages)));
CMM_SHOW(loaned_target_kb, "%lu\n", PAGES2KB(loaned_pages_target)); CMM_SHOW(loaned_target_kb, "%lu\n", PAGES2KB(loaned_pages_target));
static ssize_t show_oom_pages(struct device *dev, static ssize_t show_oom_pages(struct device *dev,
...@@ -433,7 +436,7 @@ static int cmm_reboot_notifier(struct notifier_block *nb, ...@@ -433,7 +436,7 @@ static int cmm_reboot_notifier(struct notifier_block *nb,
if (cmm_thread_ptr) if (cmm_thread_ptr)
kthread_stop(cmm_thread_ptr); kthread_stop(cmm_thread_ptr);
cmm_thread_ptr = NULL; cmm_thread_ptr = NULL;
cmm_free_pages(loaned_pages); cmm_free_pages(atomic_long_read(&loaned_pages));
} }
return NOTIFY_DONE; return NOTIFY_DONE;
} }
...@@ -540,7 +543,7 @@ static void cmm_exit(void) ...@@ -540,7 +543,7 @@ static void cmm_exit(void)
unregister_oom_notifier(&cmm_oom_nb); unregister_oom_notifier(&cmm_oom_nb);
unregister_reboot_notifier(&cmm_reboot_nb); unregister_reboot_notifier(&cmm_reboot_nb);
unregister_memory_notifier(&cmm_mem_nb); unregister_memory_notifier(&cmm_mem_nb);
cmm_free_pages(loaned_pages); cmm_free_pages(atomic_long_read(&loaned_pages));
cmm_unregister_sysfs(&cmm_dev); cmm_unregister_sysfs(&cmm_dev);
} }
...@@ -561,7 +564,7 @@ static int cmm_set_disable(const char *val, const struct kernel_param *kp) ...@@ -561,7 +564,7 @@ static int cmm_set_disable(const char *val, const struct kernel_param *kp)
if (cmm_thread_ptr) if (cmm_thread_ptr)
kthread_stop(cmm_thread_ptr); kthread_stop(cmm_thread_ptr);
cmm_thread_ptr = NULL; cmm_thread_ptr = NULL;
cmm_free_pages(loaned_pages); cmm_free_pages(atomic_long_read(&loaned_pages));
} else if (!disable && cmm_disabled) { } else if (!disable && cmm_disabled) {
cmm_thread_ptr = kthread_run(cmm_thread, NULL, "cmmthread"); cmm_thread_ptr = kthread_run(cmm_thread, NULL, "cmmthread");
if (IS_ERR(cmm_thread_ptr)) if (IS_ERR(cmm_thread_ptr))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment