Commit bb65a764 authored by Ingo Molnar's avatar Ingo Molnar

Merge branch 'mce-ripvfix' of git://git.kernel.org/pub/scm/linux/kernel/git/ras/ras into x86/mce

Merge memory fault handling fix from Tony Luck.
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parents 92254d31 6751ed65
...@@ -1186,6 +1186,7 @@ void mce_notify_process(void) ...@@ -1186,6 +1186,7 @@ void mce_notify_process(void)
{ {
unsigned long pfn; unsigned long pfn;
struct mce_info *mi = mce_find_info(); struct mce_info *mi = mce_find_info();
int flags = MF_ACTION_REQUIRED;
if (!mi) if (!mi)
mce_panic("Lost physical address for unconsumed uncorrectable error", NULL, NULL); mce_panic("Lost physical address for unconsumed uncorrectable error", NULL, NULL);
...@@ -1200,8 +1201,9 @@ void mce_notify_process(void) ...@@ -1200,8 +1201,9 @@ void mce_notify_process(void)
* doomed. We still need to mark the page as poisoned and alert any * doomed. We still need to mark the page as poisoned and alert any
* other users of the page. * other users of the page.
*/ */
if (memory_failure(pfn, MCE_VECTOR, MF_ACTION_REQUIRED) < 0 || if (!mi->restartable)
mi->restartable == 0) { flags |= MF_MUST_KILL;
if (memory_failure(pfn, MCE_VECTOR, flags) < 0) {
pr_err("Memory error not recovered"); pr_err("Memory error not recovered");
force_sig(SIGBUS, current); force_sig(SIGBUS, current);
} }
......
...@@ -1591,6 +1591,7 @@ void vmemmap_populate_print_last(void); ...@@ -1591,6 +1591,7 @@ void vmemmap_populate_print_last(void);
enum mf_flags { enum mf_flags {
MF_COUNT_INCREASED = 1 << 0, MF_COUNT_INCREASED = 1 << 0,
MF_ACTION_REQUIRED = 1 << 1, MF_ACTION_REQUIRED = 1 << 1,
MF_MUST_KILL = 1 << 2,
}; };
extern int memory_failure(unsigned long pfn, int trapno, int flags); extern int memory_failure(unsigned long pfn, int trapno, int flags);
extern void memory_failure_queue(unsigned long pfn, int trapno, int flags); extern void memory_failure_queue(unsigned long pfn, int trapno, int flags);
......
...@@ -345,14 +345,14 @@ static void add_to_kill(struct task_struct *tsk, struct page *p, ...@@ -345,14 +345,14 @@ static void add_to_kill(struct task_struct *tsk, struct page *p,
* Also when FAIL is set do a force kill because something went * Also when FAIL is set do a force kill because something went
* wrong earlier. * wrong earlier.
*/ */
static void kill_procs(struct list_head *to_kill, int doit, int trapno, static void kill_procs(struct list_head *to_kill, int forcekill, int trapno,
int fail, struct page *page, unsigned long pfn, int fail, struct page *page, unsigned long pfn,
int flags) int flags)
{ {
struct to_kill *tk, *next; struct to_kill *tk, *next;
list_for_each_entry_safe (tk, next, to_kill, nd) { list_for_each_entry_safe (tk, next, to_kill, nd) {
if (doit) { if (forcekill) {
/* /*
* In case something went wrong with munmapping * In case something went wrong with munmapping
* make sure the process doesn't catch the * make sure the process doesn't catch the
...@@ -858,7 +858,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn, ...@@ -858,7 +858,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
struct address_space *mapping; struct address_space *mapping;
LIST_HEAD(tokill); LIST_HEAD(tokill);
int ret; int ret;
int kill = 1; int kill = 1, forcekill;
struct page *hpage = compound_head(p); struct page *hpage = compound_head(p);
struct page *ppage; struct page *ppage;
...@@ -888,7 +888,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn, ...@@ -888,7 +888,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
* be called inside page lock (it's recommended but not enforced). * be called inside page lock (it's recommended but not enforced).
*/ */
mapping = page_mapping(hpage); mapping = page_mapping(hpage);
if (!PageDirty(hpage) && mapping && if (!(flags & MF_MUST_KILL) && !PageDirty(hpage) && mapping &&
mapping_cap_writeback_dirty(mapping)) { mapping_cap_writeback_dirty(mapping)) {
if (page_mkclean(hpage)) { if (page_mkclean(hpage)) {
SetPageDirty(hpage); SetPageDirty(hpage);
...@@ -965,12 +965,14 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn, ...@@ -965,12 +965,14 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
* Now that the dirty bit has been propagated to the * Now that the dirty bit has been propagated to the
* struct page and all unmaps done we can decide if * struct page and all unmaps done we can decide if
* killing is needed or not. Only kill when the page * killing is needed or not. Only kill when the page
* was dirty, otherwise the tokill list is merely * was dirty or the process is not restartable,
* otherwise the tokill list is merely
* freed. When there was a problem unmapping earlier * freed. When there was a problem unmapping earlier
* use a more force-full uncatchable kill to prevent * use a more force-full uncatchable kill to prevent
* any accesses to the poisoned memory. * any accesses to the poisoned memory.
*/ */
kill_procs(&tokill, !!PageDirty(ppage), trapno, forcekill = PageDirty(ppage) || (flags & MF_MUST_KILL);
kill_procs(&tokill, forcekill, trapno,
ret != SWAP_SUCCESS, p, pfn, flags); ret != SWAP_SUCCESS, p, pfn, flags);
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment