Commit d2c585d3 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] pagefault accounting fix

From: William Lee Irwin III <wli@holomorphy.com>

Our accounting of minor faults versus major faults is currently quite wrong.

To fix it up we need to propagate the actual fault type back to the
higher-level code.  Repurpose the currently-unused third arg to ->nopage
for this.
parent 282ed003
...@@ -420,7 +420,7 @@ transfer: no ...@@ -420,7 +420,7 @@ transfer: no
prototypes: prototypes:
void (*open)(struct vm_area_struct*); void (*open)(struct vm_area_struct*);
void (*close)(struct vm_area_struct*); void (*close)(struct vm_area_struct*);
struct page *(*nopage)(struct vm_area_struct*, unsigned long, int); struct page *(*nopage)(struct vm_area_struct*, unsigned long, int *);
locking rules: locking rules:
BKL mmap_sem BKL mmap_sem
......
...@@ -534,7 +534,7 @@ int is_hugepage_mem_enough(size_t size) ...@@ -534,7 +534,7 @@ int is_hugepage_mem_enough(size_t size)
* this far. * this far.
*/ */
static struct page *hugetlb_nopage(struct vm_area_struct *vma, static struct page *hugetlb_nopage(struct vm_area_struct *vma,
unsigned long address, int unused) unsigned long address, int *unused)
{ {
BUG(); BUG();
return NULL; return NULL;
......
...@@ -60,10 +60,12 @@ extern struct page *ia32_shared_page[]; ...@@ -60,10 +60,12 @@ extern struct page *ia32_shared_page[];
extern unsigned long *ia32_gdt; extern unsigned long *ia32_gdt;
struct page * struct page *
ia32_install_shared_page (struct vm_area_struct *vma, unsigned long address, int no_share) ia32_install_shared_page (struct vm_area_struct *vma, unsigned long address, int *type)
{ {
struct page *pg = ia32_shared_page[smp_processor_id()]; struct page *pg = ia32_shared_page[smp_processor_id()];
get_page(pg); get_page(pg);
if (type)
*type = VM_FAULT_MINOR;
return pg; return pg;
} }
......
...@@ -518,7 +518,7 @@ int is_hugepage_mem_enough(size_t size) ...@@ -518,7 +518,7 @@ int is_hugepage_mem_enough(size_t size)
return 1; return 1;
} }
static struct page *hugetlb_nopage(struct vm_area_struct * area, unsigned long address, int unused) static struct page *hugetlb_nopage(struct vm_area_struct * area, unsigned long address, int *unused)
{ {
BUG(); BUG();
return NULL; return NULL;
......
...@@ -921,7 +921,7 @@ int is_hugepage_mem_enough(size_t size) ...@@ -921,7 +921,7 @@ int is_hugepage_mem_enough(size_t size)
* this far. * this far.
*/ */
static struct page *hugetlb_nopage(struct vm_area_struct *vma, static struct page *hugetlb_nopage(struct vm_area_struct *vma,
unsigned long address, int unused) unsigned long address, int *unused)
{ {
BUG(); BUG();
return NULL; return NULL;
......
...@@ -504,7 +504,7 @@ int is_hugepage_mem_enough(size_t size) ...@@ -504,7 +504,7 @@ int is_hugepage_mem_enough(size_t size)
* this far. * this far.
*/ */
static struct page *hugetlb_nopage(struct vm_area_struct *vma, static struct page *hugetlb_nopage(struct vm_area_struct *vma,
unsigned long address, int unused) unsigned long address, int *unused)
{ {
BUG(); BUG();
return NULL; return NULL;
......
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
static struct page *alpha_core_agp_vm_nopage(struct vm_area_struct *vma, static struct page *alpha_core_agp_vm_nopage(struct vm_area_struct *vma,
unsigned long address, unsigned long address,
int write_access) int *type)
{ {
alpha_agp_info *agp = agp_bridge->dev_private_data; alpha_agp_info *agp = agp_bridge->dev_private_data;
dma_addr_t dma_addr; dma_addr_t dma_addr;
...@@ -30,6 +30,8 @@ static struct page *alpha_core_agp_vm_nopage(struct vm_area_struct *vma, ...@@ -30,6 +30,8 @@ static struct page *alpha_core_agp_vm_nopage(struct vm_area_struct *vma,
*/ */
page = virt_to_page(__va(pa)); page = virt_to_page(__va(pa));
get_page(page); get_page(page);
if (type)
*type = VM_FAULT_MINOR;
return page; return page;
} }
......
...@@ -760,16 +760,16 @@ extern int DRM(fasync)(int fd, struct file *filp, int on); ...@@ -760,16 +760,16 @@ extern int DRM(fasync)(int fd, struct file *filp, int on);
/* Mapping support (drm_vm.h) */ /* Mapping support (drm_vm.h) */
extern struct page *DRM(vm_nopage)(struct vm_area_struct *vma, extern struct page *DRM(vm_nopage)(struct vm_area_struct *vma,
unsigned long address, unsigned long address,
int write_access); int *type);
extern struct page *DRM(vm_shm_nopage)(struct vm_area_struct *vma, extern struct page *DRM(vm_shm_nopage)(struct vm_area_struct *vma,
unsigned long address, unsigned long address,
int write_access); int *type);
extern struct page *DRM(vm_dma_nopage)(struct vm_area_struct *vma, extern struct page *DRM(vm_dma_nopage)(struct vm_area_struct *vma,
unsigned long address, unsigned long address,
int write_access); int *type);
extern struct page *DRM(vm_sg_nopage)(struct vm_area_struct *vma, extern struct page *DRM(vm_sg_nopage)(struct vm_area_struct *vma,
unsigned long address, unsigned long address,
int write_access); int *type);
extern void DRM(vm_open)(struct vm_area_struct *vma); extern void DRM(vm_open)(struct vm_area_struct *vma);
extern void DRM(vm_close)(struct vm_area_struct *vma); extern void DRM(vm_close)(struct vm_area_struct *vma);
extern void DRM(vm_shm_close)(struct vm_area_struct *vma); extern void DRM(vm_shm_close)(struct vm_area_struct *vma);
......
...@@ -76,7 +76,7 @@ struct vm_operations_struct DRM(vm_sg_ops) = { ...@@ -76,7 +76,7 @@ struct vm_operations_struct DRM(vm_sg_ops) = {
*/ */
struct page *DRM(vm_nopage)(struct vm_area_struct *vma, struct page *DRM(vm_nopage)(struct vm_area_struct *vma,
unsigned long address, unsigned long address,
int write_access) int *type)
{ {
#if __REALLY_HAVE_AGP #if __REALLY_HAVE_AGP
drm_file_t *priv = vma->vm_file->private_data; drm_file_t *priv = vma->vm_file->private_data;
...@@ -133,6 +133,8 @@ struct page *DRM(vm_nopage)(struct vm_area_struct *vma, ...@@ -133,6 +133,8 @@ struct page *DRM(vm_nopage)(struct vm_area_struct *vma,
baddr, __va(agpmem->memory->memory[offset]), offset, baddr, __va(agpmem->memory->memory[offset]), offset,
atomic_read(&page->count)); atomic_read(&page->count));
if (type)
*type = VM_FAULT_MINOR;
return page; return page;
} }
vm_nopage_error: vm_nopage_error:
...@@ -154,7 +156,7 @@ struct page *DRM(vm_nopage)(struct vm_area_struct *vma, ...@@ -154,7 +156,7 @@ struct page *DRM(vm_nopage)(struct vm_area_struct *vma,
*/ */
struct page *DRM(vm_shm_nopage)(struct vm_area_struct *vma, struct page *DRM(vm_shm_nopage)(struct vm_area_struct *vma,
unsigned long address, unsigned long address,
int write_access) int *type)
{ {
drm_map_t *map = (drm_map_t *)vma->vm_private_data; drm_map_t *map = (drm_map_t *)vma->vm_private_data;
unsigned long offset; unsigned long offset;
...@@ -170,6 +172,8 @@ struct page *DRM(vm_shm_nopage)(struct vm_area_struct *vma, ...@@ -170,6 +172,8 @@ struct page *DRM(vm_shm_nopage)(struct vm_area_struct *vma,
if (!page) if (!page)
return NOPAGE_OOM; return NOPAGE_OOM;
get_page(page); get_page(page);
if (type)
*type = VM_FAULT_MINOR;
DRM_DEBUG("shm_nopage 0x%lx\n", address); DRM_DEBUG("shm_nopage 0x%lx\n", address);
return page; return page;
...@@ -268,7 +272,7 @@ void DRM(vm_shm_close)(struct vm_area_struct *vma) ...@@ -268,7 +272,7 @@ void DRM(vm_shm_close)(struct vm_area_struct *vma)
*/ */
struct page *DRM(vm_dma_nopage)(struct vm_area_struct *vma, struct page *DRM(vm_dma_nopage)(struct vm_area_struct *vma,
unsigned long address, unsigned long address,
int write_access) int *type)
{ {
drm_file_t *priv = vma->vm_file->private_data; drm_file_t *priv = vma->vm_file->private_data;
drm_device_t *dev = priv->dev; drm_device_t *dev = priv->dev;
...@@ -287,6 +291,8 @@ struct page *DRM(vm_dma_nopage)(struct vm_area_struct *vma, ...@@ -287,6 +291,8 @@ struct page *DRM(vm_dma_nopage)(struct vm_area_struct *vma,
(offset & (~PAGE_MASK)))); (offset & (~PAGE_MASK))));
get_page(page); get_page(page);
if (type)
*type = VM_FAULT_MINOR;
DRM_DEBUG("dma_nopage 0x%lx (page %lu)\n", address, page_nr); DRM_DEBUG("dma_nopage 0x%lx (page %lu)\n", address, page_nr);
return page; return page;
...@@ -304,7 +310,7 @@ struct page *DRM(vm_dma_nopage)(struct vm_area_struct *vma, ...@@ -304,7 +310,7 @@ struct page *DRM(vm_dma_nopage)(struct vm_area_struct *vma,
*/ */
struct page *DRM(vm_sg_nopage)(struct vm_area_struct *vma, struct page *DRM(vm_sg_nopage)(struct vm_area_struct *vma,
unsigned long address, unsigned long address,
int write_access) int *type)
{ {
drm_map_t *map = (drm_map_t *)vma->vm_private_data; drm_map_t *map = (drm_map_t *)vma->vm_private_data;
drm_file_t *priv = vma->vm_file->private_data; drm_file_t *priv = vma->vm_file->private_data;
...@@ -325,6 +331,8 @@ struct page *DRM(vm_sg_nopage)(struct vm_area_struct *vma, ...@@ -325,6 +331,8 @@ struct page *DRM(vm_sg_nopage)(struct vm_area_struct *vma,
page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT); page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
page = entry->pagelist[page_offset]; page = entry->pagelist[page_offset];
get_page(page); get_page(page);
if (type)
*type = VM_FAULT_MINOR;
return page; return page;
} }
......
...@@ -187,7 +187,7 @@ void dma_region_sync(struct dma_region *dma, unsigned long offset, unsigned long ...@@ -187,7 +187,7 @@ void dma_region_sync(struct dma_region *dma, unsigned long offset, unsigned long
/* nopage() handler for mmap access */ /* nopage() handler for mmap access */
static struct page* static struct page*
dma_region_pagefault(struct vm_area_struct *area, unsigned long address, int write_access) dma_region_pagefault(struct vm_area_struct *area, unsigned long address, int *type)
{ {
unsigned long offset; unsigned long offset;
unsigned long kernel_virt_addr; unsigned long kernel_virt_addr;
...@@ -202,6 +202,8 @@ dma_region_pagefault(struct vm_area_struct *area, unsigned long address, int wri ...@@ -202,6 +202,8 @@ dma_region_pagefault(struct vm_area_struct *area, unsigned long address, int wri
(address > (unsigned long) area->vm_start + (PAGE_SIZE * dma->n_pages)) ) (address > (unsigned long) area->vm_start + (PAGE_SIZE * dma->n_pages)) )
goto out; goto out;
if (type)
*type = VM_FAULT_MINOR;
offset = address - area->vm_start; offset = address - area->vm_start;
kernel_virt_addr = (unsigned long) dma->kvirt + offset; kernel_virt_addr = (unsigned long) dma->kvirt + offset;
ret = vmalloc_to_page((void*) kernel_virt_addr); ret = vmalloc_to_page((void*) kernel_virt_addr);
......
...@@ -1078,7 +1078,7 @@ videobuf_vm_close(struct vm_area_struct *vma) ...@@ -1078,7 +1078,7 @@ videobuf_vm_close(struct vm_area_struct *vma)
*/ */
static struct page* static struct page*
videobuf_vm_nopage(struct vm_area_struct *vma, unsigned long vaddr, videobuf_vm_nopage(struct vm_area_struct *vma, unsigned long vaddr,
int write_access) int *type)
{ {
struct page *page; struct page *page;
...@@ -1090,6 +1090,8 @@ videobuf_vm_nopage(struct vm_area_struct *vma, unsigned long vaddr, ...@@ -1090,6 +1090,8 @@ videobuf_vm_nopage(struct vm_area_struct *vma, unsigned long vaddr,
if (!page) if (!page)
return NOPAGE_OOM; return NOPAGE_OOM;
clear_user_page(page_address(page), vaddr, page); clear_user_page(page_address(page), vaddr, page);
if (type)
*type = VM_FAULT_MINOR;
return page; return page;
} }
......
...@@ -1118,7 +1118,7 @@ sg_rb_correct4mmap(Sg_scatter_hold * rsv_schp, int startFinish) ...@@ -1118,7 +1118,7 @@ sg_rb_correct4mmap(Sg_scatter_hold * rsv_schp, int startFinish)
} }
static struct page * static struct page *
sg_vma_nopage(struct vm_area_struct *vma, unsigned long addr, int unused) sg_vma_nopage(struct vm_area_struct *vma, unsigned long addr, int *type)
{ {
Sg_fd *sfp; Sg_fd *sfp;
struct page *page = NOPAGE_SIGBUS; struct page *page = NOPAGE_SIGBUS;
...@@ -1158,6 +1158,8 @@ sg_vma_nopage(struct vm_area_struct *vma, unsigned long addr, int unused) ...@@ -1158,6 +1158,8 @@ sg_vma_nopage(struct vm_area_struct *vma, unsigned long addr, int unused)
page = virt_to_page(page_ptr); page = virt_to_page(page_ptr);
get_page(page); /* increment page count */ get_page(page); /* increment page count */
} }
if (type)
*type = VM_FAULT_MINOR;
return page; return page;
} }
......
...@@ -26,7 +26,7 @@ ...@@ -26,7 +26,7 @@
* Fill in the supplied page for mmap * Fill in the supplied page for mmap
*/ */
static struct page* ncp_file_mmap_nopage(struct vm_area_struct *area, static struct page* ncp_file_mmap_nopage(struct vm_area_struct *area,
unsigned long address, int write_access) unsigned long address, int *type)
{ {
struct file *file = area->vm_file; struct file *file = area->vm_file;
struct dentry *dentry = file->f_dentry; struct dentry *dentry = file->f_dentry;
...@@ -85,6 +85,15 @@ static struct page* ncp_file_mmap_nopage(struct vm_area_struct *area, ...@@ -85,6 +85,15 @@ static struct page* ncp_file_mmap_nopage(struct vm_area_struct *area,
memset(pg_addr + already_read, 0, PAGE_SIZE - already_read); memset(pg_addr + already_read, 0, PAGE_SIZE - already_read);
flush_dcache_page(page); flush_dcache_page(page);
kunmap(page); kunmap(page);
/*
* If I understand ncp_read_kernel() properly, the above always
* fetches from the network, here the analogue of disk.
* -- wli
*/
if (type)
*type = VM_FAULT_MAJOR;
inc_page_state(pgmajfault);
return page; return page;
} }
......
...@@ -143,7 +143,7 @@ extern pgprot_t protection_map[16]; ...@@ -143,7 +143,7 @@ extern pgprot_t protection_map[16];
struct vm_operations_struct { struct vm_operations_struct {
void (*open)(struct vm_area_struct * area); void (*open)(struct vm_area_struct * area);
void (*close)(struct vm_area_struct * area); void (*close)(struct vm_area_struct * area);
struct page * (*nopage)(struct vm_area_struct * area, unsigned long address, int unused); struct page * (*nopage)(struct vm_area_struct * area, unsigned long address, int *type);
int (*populate)(struct vm_area_struct * area, unsigned long address, unsigned long len, pgprot_t prot, unsigned long pgoff, int nonblock); int (*populate)(struct vm_area_struct * area, unsigned long address, unsigned long len, pgprot_t prot, unsigned long pgoff, int nonblock);
}; };
...@@ -407,7 +407,7 @@ static inline int page_mapped(struct page *page) ...@@ -407,7 +407,7 @@ static inline int page_mapped(struct page *page)
extern void show_free_areas(void); extern void show_free_areas(void);
struct page *shmem_nopage(struct vm_area_struct * vma, struct page *shmem_nopage(struct vm_area_struct * vma,
unsigned long address, int unused); unsigned long address, int *type);
struct file *shmem_file_setup(char * name, loff_t size, unsigned long flags); struct file *shmem_file_setup(char * name, loff_t size, unsigned long flags);
void shmem_lock(struct file * file, int lock); void shmem_lock(struct file * file, int lock);
int shmem_zero_setup(struct vm_area_struct *); int shmem_zero_setup(struct vm_area_struct *);
...@@ -565,7 +565,7 @@ extern unsigned long page_unuse(struct page *); ...@@ -565,7 +565,7 @@ extern unsigned long page_unuse(struct page *);
extern void truncate_inode_pages(struct address_space *, loff_t); extern void truncate_inode_pages(struct address_space *, loff_t);
/* generic vm_area_ops exported for stackable file systems */ /* generic vm_area_ops exported for stackable file systems */
extern struct page *filemap_nopage(struct vm_area_struct *, unsigned long, int); struct page *filemap_nopage(struct vm_area_struct *, unsigned long, int *);
/* mm/page-writeback.c */ /* mm/page-writeback.c */
int write_one_page(struct page *page, int wait); int write_one_page(struct page *page, int wait);
......
...@@ -1323,8 +1323,6 @@ asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim) ...@@ -1323,8 +1323,6 @@ asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim)
* either stopped or zombied. In the zombied case the task won't get * either stopped or zombied. In the zombied case the task won't get
* reaped till shortly after the call to getrusage(), in both cases the * reaped till shortly after the call to getrusage(), in both cases the
* task being examined is in a frozen state so the counters won't change. * task being examined is in a frozen state so the counters won't change.
*
* FIXME! Get the fault counts properly!
*/ */
int getrusage(struct task_struct *p, int who, struct rusage __user *ru) int getrusage(struct task_struct *p, int who, struct rusage __user *ru)
{ {
......
...@@ -990,7 +990,7 @@ static int page_cache_read(struct file * file, unsigned long offset) ...@@ -990,7 +990,7 @@ static int page_cache_read(struct file * file, unsigned long offset)
* it in the page cache, and handles the special cases reasonably without * it in the page cache, and handles the special cases reasonably without
* having a lot of duplicated code. * having a lot of duplicated code.
*/ */
struct page * filemap_nopage(struct vm_area_struct * area, unsigned long address, int unused) struct page * filemap_nopage(struct vm_area_struct * area, unsigned long address, int *type)
{ {
int error; int error;
struct file *file = area->vm_file; struct file *file = area->vm_file;
...@@ -999,7 +999,7 @@ struct page * filemap_nopage(struct vm_area_struct * area, unsigned long address ...@@ -999,7 +999,7 @@ struct page * filemap_nopage(struct vm_area_struct * area, unsigned long address
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
struct page *page; struct page *page;
unsigned long size, pgoff, endoff; unsigned long size, pgoff, endoff;
int did_readaround = 0; int did_readaround = 0, majmin = VM_FAULT_MINOR;
pgoff = ((address - area->vm_start) >> PAGE_CACHE_SHIFT) + area->vm_pgoff; pgoff = ((address - area->vm_start) >> PAGE_CACHE_SHIFT) + area->vm_pgoff;
endoff = ((area->vm_end - area->vm_start) >> PAGE_CACHE_SHIFT) + area->vm_pgoff; endoff = ((area->vm_end - area->vm_start) >> PAGE_CACHE_SHIFT) + area->vm_pgoff;
...@@ -1048,6 +1048,14 @@ struct page * filemap_nopage(struct vm_area_struct * area, unsigned long address ...@@ -1048,6 +1048,14 @@ struct page * filemap_nopage(struct vm_area_struct * area, unsigned long address
if (ra->mmap_miss > ra->mmap_hit + MMAP_LOTSAMISS) if (ra->mmap_miss > ra->mmap_hit + MMAP_LOTSAMISS)
goto no_cached_page; goto no_cached_page;
/*
* To keep the pgmajfault counter straight, we need to
* check did_readaround, as this is an inner loop.
*/
if (!did_readaround) {
majmin = VM_FAULT_MAJOR;
inc_page_state(pgmajfault);
}
did_readaround = 1; did_readaround = 1;
do_page_cache_readahead(mapping, file, do_page_cache_readahead(mapping, file,
pgoff & ~(MMAP_READAROUND-1), MMAP_READAROUND); pgoff & ~(MMAP_READAROUND-1), MMAP_READAROUND);
...@@ -1069,6 +1077,8 @@ struct page * filemap_nopage(struct vm_area_struct * area, unsigned long address ...@@ -1069,6 +1077,8 @@ struct page * filemap_nopage(struct vm_area_struct * area, unsigned long address
* Found the page and have a reference on it. * Found the page and have a reference on it.
*/ */
mark_page_accessed(page); mark_page_accessed(page);
if (type)
*type = majmin;
return page; return page;
outside_data_content: outside_data_content:
...@@ -1104,7 +1114,10 @@ struct page * filemap_nopage(struct vm_area_struct * area, unsigned long address ...@@ -1104,7 +1114,10 @@ struct page * filemap_nopage(struct vm_area_struct * area, unsigned long address
return NULL; return NULL;
page_not_uptodate: page_not_uptodate:
inc_page_state(pgmajfault); if (!did_readaround) {
majmin = VM_FAULT_MAJOR;
inc_page_state(pgmajfault);
}
lock_page(page); lock_page(page);
/* Did it get unhashed while we waited for it? */ /* Did it get unhashed while we waited for it? */
......
...@@ -1400,7 +1400,7 @@ do_no_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -1400,7 +1400,7 @@ do_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
pte_t entry; pte_t entry;
struct pte_chain *pte_chain; struct pte_chain *pte_chain;
int sequence = 0; int sequence = 0;
int ret; int ret = VM_FAULT_MINOR;
if (!vma->vm_ops || !vma->vm_ops->nopage) if (!vma->vm_ops || !vma->vm_ops->nopage)
return do_anonymous_page(mm, vma, page_table, return do_anonymous_page(mm, vma, page_table,
...@@ -1414,7 +1414,7 @@ do_no_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -1414,7 +1414,7 @@ do_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
} }
smp_rmb(); /* Prevent CPU from reordering lock-free ->nopage() */ smp_rmb(); /* Prevent CPU from reordering lock-free ->nopage() */
retry: retry:
new_page = vma->vm_ops->nopage(vma, address & PAGE_MASK, 0); new_page = vma->vm_ops->nopage(vma, address & PAGE_MASK, &ret);
/* no page was available -- either SIGBUS or OOM */ /* no page was available -- either SIGBUS or OOM */
if (new_page == NOPAGE_SIGBUS) if (new_page == NOPAGE_SIGBUS)
...@@ -1483,14 +1483,12 @@ do_no_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -1483,14 +1483,12 @@ do_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
pte_unmap(page_table); pte_unmap(page_table);
page_cache_release(new_page); page_cache_release(new_page);
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
ret = VM_FAULT_MINOR;
goto out; goto out;
} }
/* no need to invalidate: a not-present page shouldn't be cached */ /* no need to invalidate: a not-present page shouldn't be cached */
update_mmu_cache(vma, address, entry); update_mmu_cache(vma, address, entry);
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
ret = VM_FAULT_MAJOR;
goto out; goto out;
oom: oom:
ret = VM_FAULT_OOM; ret = VM_FAULT_OOM;
......
...@@ -71,7 +71,7 @@ enum sgp_type { ...@@ -71,7 +71,7 @@ enum sgp_type {
}; };
static int shmem_getpage(struct inode *inode, unsigned long idx, static int shmem_getpage(struct inode *inode, unsigned long idx,
struct page **pagep, enum sgp_type sgp); struct page **pagep, enum sgp_type sgp, int *type);
static inline struct page *shmem_dir_alloc(unsigned int gfp_mask) static inline struct page *shmem_dir_alloc(unsigned int gfp_mask)
{ {
...@@ -540,7 +540,7 @@ static int shmem_notify_change(struct dentry *dentry, struct iattr *attr) ...@@ -540,7 +540,7 @@ static int shmem_notify_change(struct dentry *dentry, struct iattr *attr)
if (attr->ia_size & (PAGE_CACHE_SIZE-1)) { if (attr->ia_size & (PAGE_CACHE_SIZE-1)) {
(void) shmem_getpage(inode, (void) shmem_getpage(inode,
attr->ia_size>>PAGE_CACHE_SHIFT, attr->ia_size>>PAGE_CACHE_SHIFT,
&page, SGP_READ); &page, SGP_READ, NULL);
} }
/* /*
* Reset SHMEM_PAGEIN flag so that shmem_truncate can * Reset SHMEM_PAGEIN flag so that shmem_truncate can
...@@ -765,7 +765,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc) ...@@ -765,7 +765,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
* vm. If we swap it in we mark it dirty since we also free the swap * vm. If we swap it in we mark it dirty since we also free the swap
* entry since a page cannot live in both the swap and page cache * entry since a page cannot live in both the swap and page cache
*/ */
static int shmem_getpage(struct inode *inode, unsigned long idx, struct page **pagep, enum sgp_type sgp) static int shmem_getpage(struct inode *inode, unsigned long idx, struct page **pagep, enum sgp_type sgp, int *type)
{ {
struct address_space *mapping = inode->i_mapping; struct address_space *mapping = inode->i_mapping;
struct shmem_inode_info *info = SHMEM_I(inode); struct shmem_inode_info *info = SHMEM_I(inode);
...@@ -774,7 +774,7 @@ static int shmem_getpage(struct inode *inode, unsigned long idx, struct page **p ...@@ -774,7 +774,7 @@ static int shmem_getpage(struct inode *inode, unsigned long idx, struct page **p
struct page *swappage; struct page *swappage;
swp_entry_t *entry; swp_entry_t *entry;
swp_entry_t swap; swp_entry_t swap;
int error; int error, majmin = VM_FAULT_MINOR;
if (idx >= SHMEM_MAX_INDEX) if (idx >= SHMEM_MAX_INDEX)
return -EFBIG; return -EFBIG;
...@@ -811,6 +811,10 @@ static int shmem_getpage(struct inode *inode, unsigned long idx, struct page **p ...@@ -811,6 +811,10 @@ static int shmem_getpage(struct inode *inode, unsigned long idx, struct page **p
if (!swappage) { if (!swappage) {
shmem_swp_unmap(entry); shmem_swp_unmap(entry);
spin_unlock(&info->lock); spin_unlock(&info->lock);
/* here we actually do the io */
if (majmin == VM_FAULT_MINOR && type)
inc_page_state(pgmajfault);
majmin = VM_FAULT_MAJOR;
swapin_readahead(swap); swapin_readahead(swap);
swappage = read_swap_cache_async(swap); swappage = read_swap_cache_async(swap);
if (!swappage) { if (!swappage) {
...@@ -959,6 +963,8 @@ static int shmem_getpage(struct inode *inode, unsigned long idx, struct page **p ...@@ -959,6 +963,8 @@ static int shmem_getpage(struct inode *inode, unsigned long idx, struct page **p
} else } else
*pagep = ZERO_PAGE(0); *pagep = ZERO_PAGE(0);
} }
if (type)
*type = majmin;
return 0; return 0;
failed: failed:
...@@ -969,7 +975,7 @@ static int shmem_getpage(struct inode *inode, unsigned long idx, struct page **p ...@@ -969,7 +975,7 @@ static int shmem_getpage(struct inode *inode, unsigned long idx, struct page **p
return error; return error;
} }
struct page *shmem_nopage(struct vm_area_struct *vma, unsigned long address, int unused) struct page *shmem_nopage(struct vm_area_struct *vma, unsigned long address, int *type)
{ {
struct inode *inode = vma->vm_file->f_dentry->d_inode; struct inode *inode = vma->vm_file->f_dentry->d_inode;
struct page *page = NULL; struct page *page = NULL;
...@@ -980,7 +986,7 @@ struct page *shmem_nopage(struct vm_area_struct *vma, unsigned long address, int ...@@ -980,7 +986,7 @@ struct page *shmem_nopage(struct vm_area_struct *vma, unsigned long address, int
idx += vma->vm_pgoff; idx += vma->vm_pgoff;
idx >>= PAGE_CACHE_SHIFT - PAGE_SHIFT; idx >>= PAGE_CACHE_SHIFT - PAGE_SHIFT;
error = shmem_getpage(inode, idx, &page, SGP_CACHE); error = shmem_getpage(inode, idx, &page, SGP_CACHE, type);
if (error) if (error)
return (error == -ENOMEM)? NOPAGE_OOM: NOPAGE_SIGBUS; return (error == -ENOMEM)? NOPAGE_OOM: NOPAGE_SIGBUS;
...@@ -1007,7 +1013,7 @@ static int shmem_populate(struct vm_area_struct *vma, ...@@ -1007,7 +1013,7 @@ static int shmem_populate(struct vm_area_struct *vma,
/* /*
* Will need changing if PAGE_CACHE_SIZE != PAGE_SIZE * Will need changing if PAGE_CACHE_SIZE != PAGE_SIZE
*/ */
err = shmem_getpage(inode, pgoff, &page, sgp); err = shmem_getpage(inode, pgoff, &page, sgp, NULL);
if (err) if (err)
return err; return err;
if (page) { if (page) {
...@@ -1157,7 +1163,7 @@ static int ...@@ -1157,7 +1163,7 @@ static int
shmem_prepare_write(struct file *file, struct page *page, unsigned offset, unsigned to) shmem_prepare_write(struct file *file, struct page *page, unsigned offset, unsigned to)
{ {
struct inode *inode = page->mapping->host; struct inode *inode = page->mapping->host;
return shmem_getpage(inode, page->index, &page, SGP_WRITE); return shmem_getpage(inode, page->index, &page, SGP_WRITE, NULL);
} }
static ssize_t static ssize_t
...@@ -1214,7 +1220,7 @@ shmem_file_write(struct file *file, const char __user *buf, size_t count, loff_t ...@@ -1214,7 +1220,7 @@ shmem_file_write(struct file *file, const char __user *buf, size_t count, loff_t
* But it still may be a good idea to prefault below. * But it still may be a good idea to prefault below.
*/ */
err = shmem_getpage(inode, index, &page, SGP_WRITE); err = shmem_getpage(inode, index, &page, SGP_WRITE, NULL);
if (err) if (err)
break; break;
...@@ -1296,7 +1302,7 @@ static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_ ...@@ -1296,7 +1302,7 @@ static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_
break; break;
} }
desc->error = shmem_getpage(inode, index, &page, SGP_READ); desc->error = shmem_getpage(inode, index, &page, SGP_READ, NULL);
if (desc->error) { if (desc->error) {
if (desc->error == -EINVAL) if (desc->error == -EINVAL)
desc->error = 0; desc->error = 0;
...@@ -1552,7 +1558,7 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s ...@@ -1552,7 +1558,7 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
iput(inode); iput(inode);
return -ENOMEM; return -ENOMEM;
} }
error = shmem_getpage(inode, 0, &page, SGP_WRITE); error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL);
if (error) { if (error) {
vm_unacct_memory(VM_ACCT(1)); vm_unacct_memory(VM_ACCT(1));
iput(inode); iput(inode);
...@@ -1590,7 +1596,7 @@ static int shmem_follow_link_inline(struct dentry *dentry, struct nameidata *nd) ...@@ -1590,7 +1596,7 @@ static int shmem_follow_link_inline(struct dentry *dentry, struct nameidata *nd)
static int shmem_readlink(struct dentry *dentry, char __user *buffer, int buflen) static int shmem_readlink(struct dentry *dentry, char __user *buffer, int buflen)
{ {
struct page *page = NULL; struct page *page = NULL;
int res = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ); int res = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL);
if (res) if (res)
return res; return res;
res = vfs_readlink(dentry, buffer, buflen, kmap(page)); res = vfs_readlink(dentry, buffer, buflen, kmap(page));
...@@ -1603,7 +1609,7 @@ static int shmem_readlink(struct dentry *dentry, char __user *buffer, int buflen ...@@ -1603,7 +1609,7 @@ static int shmem_readlink(struct dentry *dentry, char __user *buffer, int buflen
static int shmem_follow_link(struct dentry *dentry, struct nameidata *nd) static int shmem_follow_link(struct dentry *dentry, struct nameidata *nd)
{ {
struct page *page = NULL; struct page *page = NULL;
int res = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ); int res = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL);
if (res) if (res)
return res; return res;
res = vfs_follow_link(nd, kmap(page)); res = vfs_follow_link(nd, kmap(page));
......
...@@ -2779,7 +2779,7 @@ unsigned int snd_pcm_capture_poll(struct file *file, poll_table * wait) ...@@ -2779,7 +2779,7 @@ unsigned int snd_pcm_capture_poll(struct file *file, poll_table * wait)
return mask; return mask;
} }
static struct page * snd_pcm_mmap_status_nopage(struct vm_area_struct *area, unsigned long address, int no_share) static struct page * snd_pcm_mmap_status_nopage(struct vm_area_struct *area, unsigned long address, int *type)
{ {
snd_pcm_substream_t *substream = (snd_pcm_substream_t *)area->vm_private_data; snd_pcm_substream_t *substream = (snd_pcm_substream_t *)area->vm_private_data;
snd_pcm_runtime_t *runtime; snd_pcm_runtime_t *runtime;
...@@ -2791,6 +2791,8 @@ static struct page * snd_pcm_mmap_status_nopage(struct vm_area_struct *area, uns ...@@ -2791,6 +2791,8 @@ static struct page * snd_pcm_mmap_status_nopage(struct vm_area_struct *area, uns
page = virt_to_page(runtime->status); page = virt_to_page(runtime->status);
if (!PageReserved(page)) if (!PageReserved(page))
get_page(page); get_page(page);
if (type)
*type = VM_FAULT_MINOR;
return page; return page;
} }
...@@ -2817,7 +2819,7 @@ int snd_pcm_mmap_status(snd_pcm_substream_t *substream, struct file *file, ...@@ -2817,7 +2819,7 @@ int snd_pcm_mmap_status(snd_pcm_substream_t *substream, struct file *file,
return 0; return 0;
} }
static struct page * snd_pcm_mmap_control_nopage(struct vm_area_struct *area, unsigned long address, int no_share) static struct page * snd_pcm_mmap_control_nopage(struct vm_area_struct *area, unsigned long address, int *type)
{ {
snd_pcm_substream_t *substream = (snd_pcm_substream_t *)area->vm_private_data; snd_pcm_substream_t *substream = (snd_pcm_substream_t *)area->vm_private_data;
snd_pcm_runtime_t *runtime; snd_pcm_runtime_t *runtime;
...@@ -2829,6 +2831,8 @@ static struct page * snd_pcm_mmap_control_nopage(struct vm_area_struct *area, un ...@@ -2829,6 +2831,8 @@ static struct page * snd_pcm_mmap_control_nopage(struct vm_area_struct *area, un
page = virt_to_page(runtime->control); page = virt_to_page(runtime->control);
if (!PageReserved(page)) if (!PageReserved(page))
get_page(page); get_page(page);
if (type)
*type = VM_FAULT_MINOR;
return page; return page;
} }
...@@ -2867,7 +2871,7 @@ static void snd_pcm_mmap_data_close(struct vm_area_struct *area) ...@@ -2867,7 +2871,7 @@ static void snd_pcm_mmap_data_close(struct vm_area_struct *area)
atomic_dec(&substream->runtime->mmap_count); atomic_dec(&substream->runtime->mmap_count);
} }
static struct page * snd_pcm_mmap_data_nopage(struct vm_area_struct *area, unsigned long address, int no_share) static struct page * snd_pcm_mmap_data_nopage(struct vm_area_struct *area, unsigned long address, int *type)
{ {
snd_pcm_substream_t *substream = (snd_pcm_substream_t *)area->vm_private_data; snd_pcm_substream_t *substream = (snd_pcm_substream_t *)area->vm_private_data;
snd_pcm_runtime_t *runtime; snd_pcm_runtime_t *runtime;
...@@ -2895,6 +2899,8 @@ static struct page * snd_pcm_mmap_data_nopage(struct vm_area_struct *area, unsig ...@@ -2895,6 +2899,8 @@ static struct page * snd_pcm_mmap_data_nopage(struct vm_area_struct *area, unsig
} }
if (!PageReserved(page)) if (!PageReserved(page))
get_page(page); get_page(page);
if (type)
*type = VM_FAULT_MINOR;
return page; return page;
} }
......
...@@ -989,7 +989,7 @@ static int emu10k1_audio_ioctl(struct inode *inode, struct file *file, unsigned ...@@ -989,7 +989,7 @@ static int emu10k1_audio_ioctl(struct inode *inode, struct file *file, unsigned
return 0; return 0;
} }
static struct page *emu10k1_mm_nopage (struct vm_area_struct * vma, unsigned long address, int write_access) static struct page *emu10k1_mm_nopage (struct vm_area_struct * vma, unsigned long address, int *type)
{ {
struct emu10k1_wavedevice *wave_dev = vma->vm_private_data; struct emu10k1_wavedevice *wave_dev = vma->vm_private_data;
struct woinst *woinst = wave_dev->woinst; struct woinst *woinst = wave_dev->woinst;
...@@ -1032,6 +1032,8 @@ static struct page *emu10k1_mm_nopage (struct vm_area_struct * vma, unsigned lon ...@@ -1032,6 +1032,8 @@ static struct page *emu10k1_mm_nopage (struct vm_area_struct * vma, unsigned lon
get_page (dmapage); get_page (dmapage);
DPD(3, "page: %#lx\n", (unsigned long) dmapage); DPD(3, "page: %#lx\n", (unsigned long) dmapage);
if (type)
*type = VM_FAULT_MINOR;
return dmapage; return dmapage;
} }
......
...@@ -2116,7 +2116,7 @@ static void via_dsp_cleanup (struct via_info *card) ...@@ -2116,7 +2116,7 @@ static void via_dsp_cleanup (struct via_info *card)
static struct page * via_mm_nopage (struct vm_area_struct * vma, static struct page * via_mm_nopage (struct vm_area_struct * vma,
unsigned long address, int write_access) unsigned long address, int *type)
{ {
struct via_info *card = vma->vm_private_data; struct via_info *card = vma->vm_private_data;
struct via_channel *chan = &card->ch_out; struct via_channel *chan = &card->ch_out;
...@@ -2124,12 +2124,11 @@ static struct page * via_mm_nopage (struct vm_area_struct * vma, ...@@ -2124,12 +2124,11 @@ static struct page * via_mm_nopage (struct vm_area_struct * vma,
unsigned long pgoff; unsigned long pgoff;
int rd, wr; int rd, wr;
DPRINTK ("ENTER, start %lXh, ofs %lXh, pgoff %ld, addr %lXh, wr %d\n", DPRINTK ("ENTER, start %lXh, ofs %lXh, pgoff %ld, addr %lXh\n",
vma->vm_start, vma->vm_start,
address - vma->vm_start, address - vma->vm_start,
(address - vma->vm_start) >> PAGE_SHIFT, (address - vma->vm_start) >> PAGE_SHIFT,
address, address);
write_access);
if (address > vma->vm_end) { if (address > vma->vm_end) {
DPRINTK ("EXIT, returning NOPAGE_SIGBUS\n"); DPRINTK ("EXIT, returning NOPAGE_SIGBUS\n");
...@@ -2167,6 +2166,8 @@ static struct page * via_mm_nopage (struct vm_area_struct * vma, ...@@ -2167,6 +2166,8 @@ static struct page * via_mm_nopage (struct vm_area_struct * vma,
DPRINTK ("EXIT, returning page %p for cpuaddr %lXh\n", DPRINTK ("EXIT, returning page %p for cpuaddr %lXh\n",
dmapage, (unsigned long) chan->pgtbl[pgoff].cpuaddr); dmapage, (unsigned long) chan->pgtbl[pgoff].cpuaddr);
get_page (dmapage); get_page (dmapage);
if (type)
*type = VM_FAULT_MINOR;
return dmapage; return dmapage;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment