Commit 63f9ccb8 authored by Rafael J. Wysocki's avatar Rafael J. Wysocki

Merge back earlier suspend/hibernation changes for v4.8.

parents 65c0554b 307c5971
......@@ -18,12 +18,11 @@ static inline void pm_set_vt_switch(int do_switch)
#endif
#ifdef CONFIG_VT_CONSOLE_SLEEP
extern int pm_prepare_console(void);
extern void pm_prepare_console(void);
extern void pm_restore_console(void);
#else
static inline int pm_prepare_console(void)
static inline void pm_prepare_console(void)
{
return 0;
}
static inline void pm_restore_console(void)
......
ccflags-$(CONFIG_PM_DEBUG) := -DDEBUG
KASAN_SANITIZE_snapshot.o := n
obj-y += qos.o
obj-$(CONFIG_PM) += main.o
obj-$(CONFIG_VT_CONSOLE_SLEEP) += console.o
......
......@@ -126,17 +126,17 @@ static bool pm_vt_switch(void)
return ret;
}
int pm_prepare_console(void)
void pm_prepare_console(void)
{
if (!pm_vt_switch())
return 0;
return;
orig_fgconsole = vt_move_to_console(SUSPEND_CONSOLE, 1);
if (orig_fgconsole < 0)
return 1;
return;
orig_kmsg = vt_kmsg_redirect(SUSPEND_CONSOLE);
return 0;
return;
}
void pm_restore_console(void)
......
......@@ -647,7 +647,7 @@ static void power_down(void)
*/
int hibernate(void)
{
int error;
int error, nr_calls = 0;
if (!hibernation_available()) {
pr_debug("PM: Hibernation not available.\n");
......@@ -662,9 +662,11 @@ int hibernate(void)
}
pm_prepare_console();
error = pm_notifier_call_chain(PM_HIBERNATION_PREPARE);
if (error)
error = __pm_notifier_call_chain(PM_HIBERNATION_PREPARE, -1, &nr_calls);
if (error) {
nr_calls--;
goto Exit;
}
printk(KERN_INFO "PM: Syncing filesystems ... ");
sys_sync();
......@@ -714,7 +716,7 @@ int hibernate(void)
/* Don't bother checking whether freezer_test_done is true */
freezer_test_done = false;
Exit:
pm_notifier_call_chain(PM_POST_HIBERNATION);
__pm_notifier_call_chain(PM_POST_HIBERNATION, nr_calls, NULL);
pm_restore_console();
atomic_inc(&snapshot_device_available);
Unlock:
......@@ -740,7 +742,7 @@ int hibernate(void)
*/
static int software_resume(void)
{
int error;
int error, nr_calls = 0;
unsigned int flags;
/*
......@@ -827,9 +829,11 @@ static int software_resume(void)
}
pm_prepare_console();
error = pm_notifier_call_chain(PM_RESTORE_PREPARE);
if (error)
error = __pm_notifier_call_chain(PM_RESTORE_PREPARE, -1, &nr_calls);
if (error) {
nr_calls--;
goto Close_Finish;
}
pr_debug("PM: Preparing processes for restore.\n");
error = freeze_processes();
......@@ -855,7 +859,7 @@ static int software_resume(void)
unlock_device_hotplug();
thaw_processes();
Finish:
pm_notifier_call_chain(PM_POST_RESTORE);
__pm_notifier_call_chain(PM_POST_RESTORE, nr_calls, NULL);
pm_restore_console();
atomic_inc(&snapshot_device_available);
/* For success case, the suspend path will release the lock */
......
......@@ -38,12 +38,19 @@ int unregister_pm_notifier(struct notifier_block *nb)
}
EXPORT_SYMBOL_GPL(unregister_pm_notifier);
int pm_notifier_call_chain(unsigned long val)
int __pm_notifier_call_chain(unsigned long val, int nr_to_call, int *nr_calls)
{
int ret = blocking_notifier_call_chain(&pm_chain_head, val, NULL);
int ret;
ret = __blocking_notifier_call_chain(&pm_chain_head, val, NULL,
nr_to_call, nr_calls);
return notifier_to_errno(ret);
}
int pm_notifier_call_chain(unsigned long val)
{
return __pm_notifier_call_chain(val, -1, NULL);
}
/* If set, devices may be suspended and resumed asynchronously. */
int pm_async_enabled = 1;
......
......@@ -200,6 +200,8 @@ static inline void suspend_test_finish(const char *label) {}
#ifdef CONFIG_PM_SLEEP
/* kernel/power/main.c */
extern int __pm_notifier_call_chain(unsigned long val, int nr_to_call,
int *nr_calls);
extern int pm_notifier_call_chain(unsigned long val);
#endif
......
......@@ -89,6 +89,9 @@ static int try_to_freeze_tasks(bool user_only)
elapsed_msecs / 1000, elapsed_msecs % 1000,
todo - wq_busy, wq_busy);
if (wq_busy)
show_workqueue_state();
if (!wakeup) {
read_lock(&tasklist_lock);
for_each_process_thread(g, p) {
......
......@@ -74,6 +74,22 @@ void __init hibernate_image_size_init(void)
*/
struct pbe *restore_pblist;
/* struct linked_page is used to build chains of pages */
#define LINKED_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(void *))
struct linked_page {
struct linked_page *next;
char data[LINKED_PAGE_DATA_SIZE];
} __packed;
/*
* List of "safe" pages (ie. pages that were not used by the image kernel
* before hibernation) that may be used as temporary storage for image kernel
* memory contents.
*/
static struct linked_page *safe_pages_list;
/* Pointer to an auxiliary buffer (1 page) */
static void *buffer;
......@@ -113,9 +129,21 @@ static void *get_image_page(gfp_t gfp_mask, int safe_needed)
return res;
}
static void *__get_safe_page(gfp_t gfp_mask)
{
if (safe_pages_list) {
void *ret = safe_pages_list;
safe_pages_list = safe_pages_list->next;
memset(ret, 0, PAGE_SIZE);
return ret;
}
return get_image_page(gfp_mask, PG_SAFE);
}
unsigned long get_safe_page(gfp_t gfp_mask)
{
return (unsigned long)get_image_page(gfp_mask, PG_SAFE);
return (unsigned long)__get_safe_page(gfp_mask);
}
static struct page *alloc_image_page(gfp_t gfp_mask)
......@@ -130,6 +158,14 @@ static struct page *alloc_image_page(gfp_t gfp_mask)
return page;
}
static void recycle_safe_page(void *page_address)
{
struct linked_page *lp = page_address;
lp->next = safe_pages_list;
safe_pages_list = lp;
}
/**
* free_image_page - free page represented by @addr, allocated with
* get_image_page (page flags set by it must be cleared)
......@@ -150,15 +186,6 @@ static inline void free_image_page(void *addr, int clear_nosave_free)
__free_page(page);
}
/* struct linked_page is used to build chains of pages */
#define LINKED_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(void *))
struct linked_page {
struct linked_page *next;
char data[LINKED_PAGE_DATA_SIZE];
} __packed;
static inline void
free_list_of_pages(struct linked_page *list, int clear_page_nosave)
{
......@@ -208,7 +235,8 @@ static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) {
struct linked_page *lp;
lp = get_image_page(ca->gfp_mask, ca->safe_needed);
lp = ca->safe_needed ? __get_safe_page(ca->gfp_mask) :
get_image_page(ca->gfp_mask, PG_ANY);
if (!lp)
return NULL;
......@@ -832,6 +860,34 @@ struct nosave_region {
static LIST_HEAD(nosave_regions);
static void recycle_zone_bm_rtree(struct mem_zone_bm_rtree *zone)
{
struct rtree_node *node;
list_for_each_entry(node, &zone->nodes, list)
recycle_safe_page(node->data);
list_for_each_entry(node, &zone->leaves, list)
recycle_safe_page(node->data);
}
static void memory_bm_recycle(struct memory_bitmap *bm)
{
struct mem_zone_bm_rtree *zone;
struct linked_page *p_list;
list_for_each_entry(zone, &bm->zones, list)
recycle_zone_bm_rtree(zone);
p_list = bm->p_list;
while (p_list) {
struct linked_page *lp = p_list;
p_list = lp->next;
recycle_safe_page(lp);
}
}
/**
* register_nosave_region - register a range of page frames the contents
* of which should not be saved during the suspend (to be used in the early
......@@ -1999,53 +2055,41 @@ int snapshot_read_next(struct snapshot_handle *handle)
return PAGE_SIZE;
}
static void duplicate_memory_bitmap(struct memory_bitmap *dst,
struct memory_bitmap *src)
{
unsigned long pfn;
memory_bm_position_reset(src);
pfn = memory_bm_next_pfn(src);
while (pfn != BM_END_OF_MAP) {
memory_bm_set_bit(dst, pfn);
pfn = memory_bm_next_pfn(src);
}
}
/**
* mark_unsafe_pages - mark the pages that cannot be used for storing
* the image during resume, because they conflict with the pages that
* had been used before suspend
*/
static int mark_unsafe_pages(struct memory_bitmap *bm)
static void mark_unsafe_pages(struct memory_bitmap *bm)
{
struct zone *zone;
unsigned long pfn, max_zone_pfn;
unsigned long pfn;
/* Clear page flags */
for_each_populated_zone(zone) {
max_zone_pfn = zone_end_pfn(zone);
for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
if (pfn_valid(pfn))
swsusp_unset_page_free(pfn_to_page(pfn));
/* Clear the "free"/"unsafe" bit for all PFNs */
memory_bm_position_reset(free_pages_map);
pfn = memory_bm_next_pfn(free_pages_map);
while (pfn != BM_END_OF_MAP) {
memory_bm_clear_current(free_pages_map);
pfn = memory_bm_next_pfn(free_pages_map);
}
/* Mark pages that correspond to the "original" pfns as "unsafe" */
memory_bm_position_reset(bm);
do {
pfn = memory_bm_next_pfn(bm);
if (likely(pfn != BM_END_OF_MAP)) {
if (likely(pfn_valid(pfn)))
swsusp_set_page_free(pfn_to_page(pfn));
else
return -EFAULT;
}
} while (pfn != BM_END_OF_MAP);
/* Mark pages that correspond to the "original" PFNs as "unsafe" */
duplicate_memory_bitmap(free_pages_map, bm);
allocated_unsafe_pages = 0;
return 0;
}
static void
duplicate_memory_bitmap(struct memory_bitmap *dst, struct memory_bitmap *src)
{
unsigned long pfn;
memory_bm_position_reset(src);
pfn = memory_bm_next_pfn(src);
while (pfn != BM_END_OF_MAP) {
memory_bm_set_bit(dst, pfn);
pfn = memory_bm_next_pfn(src);
}
}
static int check_header(struct swsusp_info *info)
......@@ -2095,7 +2139,7 @@ static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
/* Extract and buffer page key for data page (s390 only). */
page_key_memorize(buf + j);
if (memory_bm_pfn_present(bm, buf[j]))
if (pfn_valid(buf[j]) && memory_bm_pfn_present(bm, buf[j]))
memory_bm_set_bit(bm, buf[j]);
else
return -EFAULT;
......@@ -2104,11 +2148,6 @@ static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
return 0;
}
/* List of "safe" pages that may be used to store data loaded from the suspend
* image
*/
static struct linked_page *safe_pages_list;
#ifdef CONFIG_HIGHMEM
/* struct highmem_pbe is used for creating the list of highmem pages that
* should be restored atomically during the resume from disk, because the page
......@@ -2334,7 +2373,7 @@ static int
prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
{
unsigned int nr_pages, nr_highmem;
struct linked_page *sp_list, *lp;
struct linked_page *lp;
int error;
/* If there is no highmem, the buffer will not be necessary */
......@@ -2342,9 +2381,7 @@ prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
buffer = NULL;
nr_highmem = count_highmem_image_pages(bm);
error = mark_unsafe_pages(bm);
if (error)
goto Free;
mark_unsafe_pages(bm);
error = memory_bm_create(new_bm, GFP_ATOMIC, PG_SAFE);
if (error)
......@@ -2362,9 +2399,9 @@ prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
* NOTE: This way we make sure there will be enough safe pages for the
* chain_alloc() in get_buffer(). It is a bit wasteful, but
* nr_copy_pages cannot be greater than 50% of the memory anyway.
*
* nr_copy_pages cannot be less than allocated_unsafe_pages too.
*/
sp_list = NULL;
/* nr_copy_pages cannot be lesser than allocated_unsafe_pages */
nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE);
while (nr_pages > 0) {
......@@ -2373,12 +2410,11 @@ prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
error = -ENOMEM;
goto Free;
}
lp->next = sp_list;
sp_list = lp;
lp->next = safe_pages_list;
safe_pages_list = lp;
nr_pages--;
}
/* Preallocate memory for the image */
safe_pages_list = NULL;
nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
while (nr_pages > 0) {
lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC);
......@@ -2396,12 +2432,6 @@ prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
swsusp_set_page_free(virt_to_page(lp));
nr_pages--;
}
/* Free the reserved safe pages so that chain_alloc() can use them */
while (sp_list) {
lp = sp_list->next;
free_image_page(sp_list, PG_UNSAFE_CLEAR);
sp_list = lp;
}
return 0;
Free:
......@@ -2491,6 +2521,8 @@ int snapshot_write_next(struct snapshot_handle *handle)
if (error)
return error;
safe_pages_list = NULL;
error = memory_bm_create(&copy_bm, GFP_ATOMIC, PG_ANY);
if (error)
return error;
......@@ -2546,9 +2578,9 @@ void snapshot_write_finalize(struct snapshot_handle *handle)
/* Restore page key for data page (s390 only). */
page_key_write(handle->buffer);
page_key_free();
/* Free only if we have loaded the image entirely */
/* Do that only if we have loaded the image entirely */
if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) {
memory_bm_free(&orig_bm, PG_UNSAFE_CLEAR);
memory_bm_recycle(&orig_bm);
free_highmem_data();
}
}
......
......@@ -266,16 +266,18 @@ static int suspend_test(int level)
*/
static int suspend_prepare(suspend_state_t state)
{
int error;
int error, nr_calls = 0;
if (!sleep_state_supported(state))
return -EPERM;
pm_prepare_console();
error = pm_notifier_call_chain(PM_SUSPEND_PREPARE);
if (error)
error = __pm_notifier_call_chain(PM_SUSPEND_PREPARE, -1, &nr_calls);
if (error) {
nr_calls--;
goto Finish;
}
trace_suspend_resume(TPS("freeze_processes"), 0, true);
error = suspend_freeze_processes();
......@@ -286,7 +288,7 @@ static int suspend_prepare(suspend_state_t state)
suspend_stats.failed_freeze++;
dpm_save_failed_step(SUSPEND_FREEZE);
Finish:
pm_notifier_call_chain(PM_POST_SUSPEND);
__pm_notifier_call_chain(PM_POST_SUSPEND, nr_calls, NULL);
pm_restore_console();
return error;
}
......
......@@ -47,7 +47,7 @@ atomic_t snapshot_device_available = ATOMIC_INIT(1);
static int snapshot_open(struct inode *inode, struct file *filp)
{
struct snapshot_data *data;
int error;
int error, nr_calls = 0;
if (!hibernation_available())
return -EPERM;
......@@ -74,9 +74,9 @@ static int snapshot_open(struct inode *inode, struct file *filp)
swap_type_of(swsusp_resume_device, 0, NULL) : -1;
data->mode = O_RDONLY;
data->free_bitmaps = false;
error = pm_notifier_call_chain(PM_HIBERNATION_PREPARE);
error = __pm_notifier_call_chain(PM_HIBERNATION_PREPARE, -1, &nr_calls);
if (error)
pm_notifier_call_chain(PM_POST_HIBERNATION);
__pm_notifier_call_chain(PM_POST_HIBERNATION, --nr_calls, NULL);
} else {
/*
* Resuming. We may need to wait for the image device to
......@@ -86,13 +86,15 @@ static int snapshot_open(struct inode *inode, struct file *filp)
data->swap = -1;
data->mode = O_WRONLY;
error = pm_notifier_call_chain(PM_RESTORE_PREPARE);
error = __pm_notifier_call_chain(PM_RESTORE_PREPARE, -1, &nr_calls);
if (!error) {
error = create_basic_memory_bitmaps();
data->free_bitmaps = !error;
}
} else
nr_calls--;
if (error)
pm_notifier_call_chain(PM_POST_RESTORE);
__pm_notifier_call_chain(PM_POST_RESTORE, nr_calls, NULL);
}
if (error)
atomic_inc(&snapshot_device_available);
......
......@@ -4369,8 +4369,8 @@ static void show_pwq(struct pool_workqueue *pwq)
/**
* show_workqueue_state - dump workqueue state
*
* Called from a sysrq handler and prints out all busy workqueues and
* pools.
* Called from a sysrq handler or try_to_freeze_tasks() and prints out
* all busy workqueues and pools.
*/
void show_workqueue_state(void)
{
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment