Commit d06063cc authored by Linus Torvalds's avatar Linus Torvalds

Move FAULT_FLAG_xyz into handle_mm_fault() callers

This allows the callers to now pass down the full set of FAULT_FLAG_xyz
flags to handle_mm_fault().  All callers have been (mechanically)
converted to the new calling convention, there's almost certainly room
for architectures to clean up their code and then add FAULT_FLAG_RETRY
when that support is added.
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 30c9f3a9
......@@ -146,7 +146,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
/* If for any reason at all we couldn't handle the fault,
make sure we exit gracefully rather than endlessly redo
the fault. */
fault = handle_mm_fault(mm, vma, address, cause > 0);
fault = handle_mm_fault(mm, vma, address, cause > 0 ? FAULT_FLAG_WRITE : 0);
up_read(&mm->mmap_sem);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
......
......@@ -208,7 +208,7 @@ __do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
* than endlessly redo the fault.
*/
survive:
fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, fsr & (1 << 11));
fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, (fsr & (1 << 11)) ? FAULT_FLAG_WRITE : 0);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
......
......@@ -133,7 +133,7 @@ asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs)
* fault.
*/
survive:
fault = handle_mm_fault(mm, vma, address, writeaccess);
fault = handle_mm_fault(mm, vma, address, writeaccess ? FAULT_FLAG_WRITE : 0);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
......
......@@ -163,7 +163,7 @@ do_page_fault(unsigned long address, struct pt_regs *regs,
* the fault.
*/
fault = handle_mm_fault(mm, vma, address, writeaccess & 1);
fault = handle_mm_fault(mm, vma, address, (writeaccess & 1) ? FAULT_FLAG_WRITE : 0);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
......
......@@ -163,7 +163,7 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
fault = handle_mm_fault(mm, vma, ear0, write);
fault = handle_mm_fault(mm, vma, ear0, write ? FAULT_FLAG_WRITE : 0);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
......
......@@ -154,7 +154,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
* sure we exit gracefully rather than endlessly redo the
* fault.
*/
fault = handle_mm_fault(mm, vma, address, (mask & VM_WRITE) != 0);
fault = handle_mm_fault(mm, vma, address, (mask & VM_WRITE) ? FAULT_FLAG_WRITE : 0);
if (unlikely(fault & VM_FAULT_ERROR)) {
/*
* We ran out of memory, or some other thing happened
......
......@@ -196,7 +196,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code,
*/
addr = (address & PAGE_MASK);
set_thread_fault_code(error_code);
fault = handle_mm_fault(mm, vma, addr, write);
fault = handle_mm_fault(mm, vma, addr, write ? FAULT_FLAG_WRITE : 0);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
......
......@@ -155,7 +155,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
*/
survive:
fault = handle_mm_fault(mm, vma, address, write);
fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
#ifdef DEBUG
printk("handle_mm_fault returns %d\n",fault);
#endif
......
......@@ -232,7 +232,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
* the fault.
*/
survive:
fault = handle_mm_fault(mm, vma, address, is_write);
fault = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
......
......@@ -102,7 +102,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
fault = handle_mm_fault(mm, vma, address, write);
fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
......
......@@ -258,7 +258,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long fault_code,
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
fault = handle_mm_fault(mm, vma, address, write);
fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
......
......@@ -202,7 +202,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
* fault.
*/
fault = handle_mm_fault(mm, vma, address, (acc_type & VM_WRITE) != 0);
fault = handle_mm_fault(mm, vma, address, (acc_type & VM_WRITE) ? FAULT_FLAG_WRITE : 0);
if (unlikely(fault & VM_FAULT_ERROR)) {
/*
* We hit a shared mapping outside of the file, or some
......
......@@ -302,7 +302,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
* the fault.
*/
survive:
ret = handle_mm_fault(mm, vma, address, is_write);
ret = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0);
if (unlikely(ret & VM_FAULT_ERROR)) {
if (ret & VM_FAULT_OOM)
goto out_of_memory;
......
......@@ -70,7 +70,7 @@ int spu_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
}
ret = 0;
*flt = handle_mm_fault(mm, vma, ea, is_write);
*flt = handle_mm_fault(mm, vma, ea, is_write ? FAULT_FLAG_WRITE : 0);
if (unlikely(*flt & VM_FAULT_ERROR)) {
if (*flt & VM_FAULT_OOM) {
ret = -ENOMEM;
......
......@@ -66,7 +66,7 @@ static int __handle_fault(struct mm_struct *mm, unsigned long address,
}
survive:
fault = handle_mm_fault(mm, vma, address, write_access);
fault = handle_mm_fault(mm, vma, address, write_access ? FAULT_FLAG_WRITE : 0);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
......
......@@ -352,7 +352,7 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int write)
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
fault = handle_mm_fault(mm, vma, address, write);
fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM) {
up_read(&mm->mmap_sem);
......
......@@ -133,7 +133,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
* the fault.
*/
survive:
fault = handle_mm_fault(mm, vma, address, writeaccess);
fault = handle_mm_fault(mm, vma, address, writeaccess ? FAULT_FLAG_WRITE : 0);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
......
......@@ -187,7 +187,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
* the fault.
*/
survive:
fault = handle_mm_fault(mm, vma, address, writeaccess);
fault = handle_mm_fault(mm, vma, address, writeaccess ? FAULT_FLAG_WRITE : 0);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
......
......@@ -241,7 +241,7 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
fault = handle_mm_fault(mm, vma, address, write);
fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
......@@ -484,7 +484,7 @@ static void force_user_fault(unsigned long address, int write)
if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
goto bad_area;
}
switch (handle_mm_fault(mm, vma, address, write)) {
switch (handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0)) {
case VM_FAULT_SIGBUS:
case VM_FAULT_OOM:
goto do_sigbus;
......
......@@ -398,7 +398,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
goto bad_area;
}
fault = handle_mm_fault(mm, vma, address, (fault_code & FAULT_CODE_WRITE));
fault = handle_mm_fault(mm, vma, address, (fault_code & FAULT_CODE_WRITE) ? FAULT_FLAG_WRITE : 0);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
......
......@@ -65,7 +65,7 @@ int handle_page_fault(unsigned long address, unsigned long ip,
do {
int fault;
fault = handle_mm_fault(mm, vma, address, is_write);
fault = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM) {
goto out_of_memory;
......
......@@ -1113,7 +1113,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
* make sure we exit gracefully rather than endlessly redo
* the fault:
*/
fault = handle_mm_fault(mm, vma, address, write);
fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
if (unlikely(fault & VM_FAULT_ERROR)) {
mm_fault_error(regs, error_code, address, fault);
......
......@@ -106,7 +106,7 @@ void do_page_fault(struct pt_regs *regs)
* the fault.
*/
survive:
fault = handle_mm_fault(mm, vma, address, is_write);
fault = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
......
......@@ -810,11 +810,11 @@ extern int vmtruncate_range(struct inode * inode, loff_t offset, loff_t end);
#ifdef CONFIG_MMU
extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, int write_access);
unsigned long address, unsigned int flags);
#else
static inline int handle_mm_fault(struct mm_struct *mm,
struct vm_area_struct *vma, unsigned long address,
int write_access)
unsigned int flags)
{
/* should never happen if there's no MMU */
BUG();
......
......@@ -1310,8 +1310,9 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
cond_resched();
while (!(page = follow_page(vma, start, foll_flags))) {
int ret;
ret = handle_mm_fault(mm, vma, start,
foll_flags & FOLL_WRITE);
/* FOLL_WRITE matches FAULT_FLAG_WRITE! */
ret = handle_mm_fault(mm, vma, start, foll_flags & FOLL_WRITE);
if (ret & VM_FAULT_ERROR) {
if (ret & VM_FAULT_OOM)
return i ? i : -ENOMEM;
......@@ -2958,13 +2959,12 @@ static inline int handle_pte_fault(struct mm_struct *mm,
* By the time we get here, we already hold the mm semaphore
*/
int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, int write_access)
unsigned long address, unsigned int flags)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
unsigned int flags = write_access ? FAULT_FLAG_WRITE : 0;
__set_current_state(TASK_RUNNING);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment